Java 类org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto 实例源码

项目:hadoop    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:aliyun-oss-hadoop-fs    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:big-c    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop-2.6.0-cdh5.4.3    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:FlexMap    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop-on-lustre2    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (!hasHttpServerEndPoint()) {
        // force an RPC call so we know what the HTTP port should be if it
        // haven't done so.
        GetJournalStateResponseProto ret = getProxy().getJournalState(
            journalId);
        constructHttpServerURI(ret);
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:aliyun-oss-hadoop-fs    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:big-c    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:big-c    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:big-c    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop-EAR    文件:TestSegmentRecoveryComparator.java   
@Test
public void testComparisons() {
  PrepareRecoveryResponseProto p1 = makeSSPInstance(1L, 3L, true);
  p1.setLastWriterEpoch(0L);
  Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_3 =
      makeEntry(p1);

  PrepareRecoveryResponseProto p2 = makeSSPInstance(1L, 4L, true);
  p2.setLastWriterEpoch(0L);
  Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4 =
      makeEntry(p2);

  PrepareRecoveryResponseProto p3 = makeSSPInstance(1L, 4L, true);
  p3.setLastWriterEpoch(0L);
  p3.setAcceptedInEpoch(1L);    
  Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4_ACCEPTED =
      makeEntry(p3);

  PrepareRecoveryResponseProto p4 = makeSSPInstance(1L, 3L, false);
  p4.setLastWriterEpoch(0L);
  Entry<AsyncLogger, PrepareRecoveryResponseProto> FINALIZED_1_3 =
      makeEntry(p4);

  // Should compare equal to itself
  assertEquals(0, INSTANCE.compare(INPROGRESS_1_3, INPROGRESS_1_3));

  // Longer log wins.
  assertEquals(-1, INSTANCE.compare(INPROGRESS_1_3, INPROGRESS_1_4));
  assertEquals(1, INSTANCE.compare(INPROGRESS_1_4, INPROGRESS_1_3));

  // Finalized log wins even over a longer in-progress
  assertEquals(-1, INSTANCE.compare(INPROGRESS_1_4, FINALIZED_1_3));
  assertEquals(1, INSTANCE.compare(FINALIZED_1_3, INPROGRESS_1_4));

  // Finalized log wins even if the in-progress one has an accepted
  // recovery proposal.
  assertEquals(-1, INSTANCE.compare(INPROGRESS_1_4_ACCEPTED, FINALIZED_1_3));
  assertEquals(1, INSTANCE.compare(FINALIZED_1_3, INPROGRESS_1_4_ACCEPTED));
}
项目:hadoop-EAR    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop-EAR    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (httpPort < 0) {
        // If the HTTP port hasn't been set yet, force an RPC call so we know
        // what the HTTP port should be.
        httpPort = getProxy().getJournalState(journalIdBytes).getHttpPort();
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop-EAR    文件:Journal.java   
/**
 * @see QJournalProtocol#prepareRecovery(RequestInfo, long)
 */
public synchronized PrepareRecoveryResponseProto prepareRecovery(
    RequestInfo reqInfo, long segmentTxId) throws IOException {
  checkJournalStorageFormatted();
  checkRequest(reqInfo);

  abortCurSegment();

  PrepareRecoveryResponseProto ret = new PrepareRecoveryResponseProto();

  PersistedRecoveryPaxosData previouslyAccepted = getPersistedPaxosData(segmentTxId);
  completeHalfDoneAcceptRecovery(previouslyAccepted);

  SegmentStateProto segInfo = getSegmentInfo(segmentTxId);
  boolean hasFinalizedSegment = segInfo != null && !segInfo.getIsInProgress();

  if (previouslyAccepted != null && !hasFinalizedSegment) {
    ret.setAcceptedInEpoch(previouslyAccepted.getAcceptedInEpoch());   
    ret.setSegmentState(previouslyAccepted.getSegmentState());
  } else {
    if (segInfo != null) {
      ret.setSegmentState(segInfo);
    }
  }

  ret.setLastWriterEpoch(lastWriterEpoch.get());
  if (committedTxnId.get() != HdfsConstants.INVALID_TXID) {
    ret.setLastCommittedTxId(committedTxnId.get());
  }

  LOG.info("Prepared recovery for segment " + segmentTxId + ": " + ret);
  return ret;
}
项目:hadoop-plus    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop-plus    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (httpPort < 0) {
        // If the HTTP port hasn't been set yet, force an RPC call so we know
        // what the HTTP port should be.
        httpPort = getProxy().getJournalState(journalId).getHttpPort();
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop-plus    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-plus    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:FlexMap    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:FlexMap    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:FlexMap    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop-TCP    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop-TCP    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (httpPort < 0) {
        // If the HTTP port hasn't been set yet, force an RPC call so we know
        // what the HTTP port should be.
        httpPort = getProxy().getJournalState(journalId).getHttpPort();
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hadoop-TCP    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-TCP    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hardfs    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hardfs    文件:IPCLoggerChannel.java   
@Override
public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery(
    final long segmentTxId) {
  return executor.submit(new Callable<PrepareRecoveryResponseProto>() {
    @Override
    public PrepareRecoveryResponseProto call() throws IOException {
      if (httpPort < 0) {
        // If the HTTP port hasn't been set yet, force an RPC call so we know
        // what the HTTP port should be.
        httpPort = getProxy().getJournalState(journalId).getHttpPort();
      }
      return getProxy().prepareRecovery(createReqInfo(), segmentTxId);
    }
  });
}
项目:hardfs    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hardfs    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop-on-lustre2    文件:AsyncLoggerSet.java   
QuorumCall<AsyncLogger, PrepareRecoveryResponseProto>
    prepareRecovery(long segmentTxId) {
  Map<AsyncLogger,
    ListenableFuture<PrepareRecoveryResponseProto>> calls
    = Maps.newHashMap();
  for (AsyncLogger logger : loggers) {
    ListenableFuture<PrepareRecoveryResponseProto> future =
        logger.prepareRecovery(segmentTxId);
    calls.put(logger, future);
  }
  return QuorumCall.create(calls);
}
项目:hadoop-on-lustre2    文件:QJournalProtocolServerSideTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RpcController controller,
    PrepareRecoveryRequestProto request) throws ServiceException {
  try {
    return impl.prepareRecovery(convert(request.getReqInfo()),
        request.getSegmentTxId());
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-on-lustre2    文件:QJournalProtocolTranslatorPB.java   
@Override
public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo,
    long segmentTxId) throws IOException {
  try {
    return rpcProxy.prepareRecovery(NULL_CONTROLLER,
        PrepareRecoveryRequestProto.newBuilder()
          .setReqInfo(convert(reqInfo))
          .setSegmentTxId(segmentTxId)
          .build());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}