Java 类org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto 实例源码

项目:hadoop    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hadoop    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelper.convert(req.getOldBlock()),
        PBHelper.convert(req.getNewBlock()),
        PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelperClient.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelperClient.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:aliyun-oss-hadoop-fs    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelperClient.convert(req.getOldBlock()),
        PBHelperClient.convert(req.getNewBlock()),
        PBHelperClient.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:big-c    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:big-c    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelper.convert(req.getOldBlock()),
        PBHelper.convert(req.getNewBlock()),
        PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelper.convert(req.getOldBlock()),
        PBHelper.convert(req.getNewBlock()),
        PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-plus    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:FlexMap    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:FlexMap    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelper.convert(req.getOldBlock()),
        PBHelper.convert(req.getNewBlock()),
        PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hops    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hadoop-TCP    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hardfs    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hadoop-on-lustre2    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
    RpcController controller, CommitBlockSynchronizationRequestProto request)
    throws ServiceException {
  List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
  DatanodeID[] dns = new DatanodeID[dnprotos.size()];
  for (int i = 0; i < dnprotos.size(); i++) {
    dns[i] = PBHelper.convert(dnprotos.get(i));
  }
  final List<String> sidprotos = request.getNewTargetStoragesList();
  final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
  try {
    impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
        request.getNewGenStamp(), request.getNewLength(),
        request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
项目:hadoop-on-lustre2    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    List<String> newStorageIDs = req.getStorageIDsList();
    server.updatePipeline(req.getClientName(),
        PBHelper.convert(req.getOldBlock()),
        PBHelper.convert(req.getNewBlock()),
        PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
        newStorageIDs.toArray(new String[newStorageIDs.size()]));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  // For wire compatibility with older versions we transmit the StorageID
  // which is the same as the DatanodeUuid. Since StorageID is a required
  // field we pass the empty string if the DatanodeUuid is not yet known.
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setXferPort(dn.getXferPort())
      .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
      .setInfoPort(dn.getInfoPort())
      .setInfoSecurePort(dn.getInfoSecurePort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeID() {
  DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
  DatanodeIDProto dnProto = PBHelper.convert(dn);
  DatanodeID dn2 = PBHelper.convert(dnProto);
  compare(dn, dn2);
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  // For wire compatibility with older versions we transmit the StorageID
  // which is the same as the DatanodeUuid. Since StorageID is a required
  // field we pass the empty string if the DatanodeUuid is not yet known.
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setXferPort(dn.getXferPort())
      .setDatanodeUuid(dn.getDatanodeUuid() != null ?
          dn.getDatanodeUuid() : "")
      .setInfoPort(dn.getInfoPort())
      .setInfoSecurePort(dn.getInfoSecurePort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeID() {
  DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
  DatanodeIDProto dnProto = PBHelperClient.convert(dn);
  DatanodeID dn2 = PBHelperClient.convert(dnProto);
  compare(dn, dn2);
}
项目:big-c    文件:PBHelper.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  // For wire compatibility with older versions we transmit the StorageID
  // which is the same as the DatanodeUuid. Since StorageID is a required
  // field we pass the empty string if the DatanodeUuid is not yet known.
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setXferPort(dn.getXferPort())
      .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
      .setInfoPort(dn.getInfoPort())
      .setInfoSecurePort(dn.getInfoSecurePort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:big-c    文件:PBHelper.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:big-c    文件:PBHelper.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeID() {
  DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
  DatanodeIDProto dnProto = PBHelper.convert(dn);
  DatanodeID dn2 = PBHelper.convert(dnProto);
  compare(dn, dn2);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  // For wire compatibility with older versions we transmit the StorageID
  // which is the same as the DatanodeUuid. Since StorageID is a required
  // field we pass the empty string if the DatanodeUuid is not yet known.
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setXferPort(dn.getXferPort())
      .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
      .setInfoPort(dn.getInfoPort())
      .setInfoSecurePort(dn.getInfoSecurePort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeID() {
  DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
  DatanodeIDProto dnProto = PBHelper.convert(dn);
  DatanodeID dn2 = PBHelper.convert(dnProto);
  compare(dn, dn2);
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setStorageID(dn.getStorageID())
      .setXferPort(dn.getXferPort())
      .setInfoPort(dn.getInfoPort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:hadoop-plus    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
    UpdatePipelineRequestProto req) throws ServiceException {
  try {
    List<DatanodeIDProto> newNodes = req.getNewNodesList();
    server
        .updatePipeline(req.getClientName(), PBHelper.convert(req
            .getOldBlock()), PBHelper.convert(req.getNewBlock()), PBHelper
            .convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])));
    return VOID_UPDATEPIPELINE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-plus    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeID() {
  DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
  DatanodeIDProto dnProto = PBHelper.convert(dn);
  DatanodeID dn2 = PBHelper.convert(dnProto);
  compare(dn, dn2);
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeIDProto convert(DatanodeID dn) {
  // For wire compatibility with older versions we transmit the StorageID
  // which is the same as the DatanodeUuid. Since StorageID is a required
  // field we pass the empty string if the DatanodeUuid is not yet known.
  return DatanodeIDProto.newBuilder()
      .setIpAddr(dn.getIpAddr())
      .setHostName(dn.getHostName())
      .setXferPort(dn.getXferPort())
      .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
      .setInfoPort(dn.getInfoPort())
      .setInfoSecurePort(dn.getInfoSecurePort())
      .setIpcPort(dn.getIpcPort()).build();
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeIDProto[] convert(DatanodeID[] did) {
  if (did == null)
    return null;
  final int len = did.length;
  DatanodeIDProto[] result = new DatanodeIDProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}