Java 类org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto 实例源码

项目:hadoop-plus    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
    HeartbeatRequestProto request) throws ServiceException {
  HeartbeatResponse response;
  try {
    List<StorageReportProto> list = request.getReportsList();
    StorageReport[] report = new StorageReport[list.size()];
    int i = 0;
    for (StorageReportProto p : list) {
      report[i++] = new StorageReport(p.getStorageID(), p.getFailed(),
          p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
          p.getBlockPoolUsed());
    }
    response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
        report, request.getXmitsInProgress(), request.getXceiverCount(),
        request.getFailedVolumes());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
      .newBuilder();
  DatanodeCommand[] cmds = response.getCommands();
  if (cmds != null) {
    for (int i = 0; i < cmds.length; i++) {
      if (cmds[i] != null) {
        builder.addCmds(PBHelper.convert(cmds[i]));
      }
    }
  }
  builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
  return builder.build();
}
项目:hops    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
    HeartbeatRequestProto request) throws ServiceException {
  HeartbeatResponse response;
  try {
    List<StorageReportProto> list = request.getReportsList();
    StorageReport[] report = new StorageReport[list.size()];
    int i = 0;
    for (StorageReportProto p : list) {
      report[i++] =
          new StorageReport(p.getStorageID(), p.getFailed(), p.getCapacity(),
              p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed());
    }
    response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
        report, request.getXmitsInProgress(), request.getXceiverCount(),
        request.getFailedVolumes());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  HeartbeatResponseProto.Builder builder =
      HeartbeatResponseProto.newBuilder();
  DatanodeCommand[] cmds = response.getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd != null) {
        builder.addCmds(PBHelper.convert(cmd));
      }
    }
  }
  return builder.build();
}
项目:hadoop-TCP    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
    HeartbeatRequestProto request) throws ServiceException {
  HeartbeatResponse response;
  try {
    List<StorageReportProto> list = request.getReportsList();
    StorageReport[] report = new StorageReport[list.size()];
    int i = 0;
    for (StorageReportProto p : list) {
      report[i++] = new StorageReport(p.getStorageID(), p.getFailed(),
          p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
          p.getBlockPoolUsed());
    }
    response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
        report, request.getXmitsInProgress(), request.getXceiverCount(),
        request.getFailedVolumes());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
      .newBuilder();
  DatanodeCommand[] cmds = response.getCommands();
  if (cmds != null) {
    for (int i = 0; i < cmds.length; i++) {
      if (cmds[i] != null) {
        builder.addCmds(PBHelper.convert(cmds[i]));
      }
    }
  }
  builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
  return builder.build();
}
项目:hardfs    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
    HeartbeatRequestProto request) throws ServiceException {
  HeartbeatResponse response;
  try {
    List<StorageReportProto> list = request.getReportsList();
    StorageReport[] report = new StorageReport[list.size()];
    int i = 0;
    for (StorageReportProto p : list) {
      report[i++] = new StorageReport(p.getStorageID(), p.getFailed(),
          p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
          p.getBlockPoolUsed());
    }
    response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
        report, request.getXmitsInProgress(), request.getXceiverCount(),
        request.getFailedVolumes());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
      .newBuilder();
  DatanodeCommand[] cmds = response.getCommands();
  if (cmds != null) {
    for (int i = 0; i < cmds.length; i++) {
      if (cmds[i] != null) {
        builder.addCmds(PBHelper.convert(cmds[i]));
      }
    }
  }
  builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
  return builder.build();
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static StorageReportProto convert(StorageReport r) {
  StorageReportProto.Builder builder = StorageReportProto.newBuilder()
      .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
      .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
      .setStorageUuid(r.getStorage().getStorageID())
      .setStorage(convert(r.getStorage()));
  return builder.build();
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static StorageReport convert(StorageReportProto p) {
  return new StorageReport(
      p.hasStorage() ?
          convert(p.getStorage()) :
          new DatanodeStorage(p.getStorageUuid()),
      p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
      p.getBlockPoolUsed());
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static StorageReport[] convertStorageReports(
    List<StorageReportProto> list) {
  final StorageReport[] report = new StorageReport[list.size()];
  for (int i = 0; i < report.length; i++) {
    report[i] = convert(list.get(i));
  }
  return report;
}
项目:hadoop-plus    文件:PBHelper.java   
public static StorageReportProto convert(StorageReport r) {
  return StorageReportProto.newBuilder()
      .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
      .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
      .setStorageID(r.getStorageID()).build();
}
项目:hops    文件:PBHelper.java   
public static StorageReportProto convert(StorageReport r) {
  return StorageReportProto.newBuilder()
      .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
      .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
      .setStorageID(r.getStorageID()).build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static StorageReportProto convert(StorageReport r) {
  return StorageReportProto.newBuilder()
      .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
      .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
      .setStorageID(r.getStorageID()).build();
}
项目:hardfs    文件:PBHelper.java   
public static StorageReportProto convert(StorageReport r) {
  return StorageReportProto.newBuilder()
      .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
      .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
      .setStorageID(r.getStorageID()).build();
}