Java 类org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto 实例源码

项目:hadoop    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:aliyun-oss-hadoop-fs    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:aliyun-oss-hadoop-fs    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("preferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));
  dumpAcls(f.getAcl());
  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:big-c    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:big-c    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:hadoop-plus    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop-plus    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:FlexMap    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:FlexMap    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:hops    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
    case TRANSFER:
      action = DatanodeProtocol.DNA_TRANSFER;
      break;
    case INVALIDATE:
      action = DatanodeProtocol.DNA_INVALIDATE;
      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hadoop-TCP    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop-TCP    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hardfs    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hardfs    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
项目:hadoop-on-lustre2    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop-on-lustre2    文件:PBImageXmlWriter.java   
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
项目:hadoop    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:aliyun-oss-hadoop-fs    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelperClient.convert(b);
  Block b2 = PBHelperClient.convert(bProto);
  assertEquals(b, b2);
}
项目:big-c    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hadoop-plus    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:FlexMap    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:FlexMap    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hops    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req =
      InitReplicaRecoveryRequestProto.newBuilder()
          .setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hops    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hadoop-TCP    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hardfs    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }

  List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
  String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
  for(int i = 0; i < targetStorageIDs.length; i++) {
    List<String> storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList();
    targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]);
  }

  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  default:
    throw new AssertionError("Unknown action type: " + blkCmd.getAction());
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
      targetStorageIDs);
}
项目:hadoop-on-lustre2    文件:FileDistributionCalculator.java   
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
项目:hadoop-on-lustre2    文件:LsrPBImage.java   
private long getFileSize(INodeFile f) {
  long size = 0;
  for (BlockProto p : f.getBlocksList()) {
    size += p.getNumBytes();
  }
  return size;
}
项目:hadoop-on-lustre2    文件:TestPBHelper.java   
@Test
public void testConvertBlock() {
  Block b = new Block(1, 100, 3);
  BlockProto bProto = PBHelper.convert(b);
  Block b2 = PBHelper.convert(bProto);
  assertEquals(b, b2);
}
项目:hadoop    文件:PBHelper.java   
public static BlockProto convert(Block b) {
  return BlockProto.newBuilder().setBlockId(b.getBlockId())
      .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
      .build();
}
项目:hadoop    文件:PBHelper.java   
public static Block convert(BlockProto b) {
  return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
}