Java 类org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto 实例源码

项目:hadoop    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:aliyun-oss-hadoop-fs    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  return ClientOperationHeaderProto.newBuilder()
    .setBaseHeader(buildBaseHeader(blk, blockToken))
    .setClientName(client)
    .build();
}
项目:big-c    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hadoop-plus    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-plus    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:FlexMap    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hops    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken, final String clientName,
    final DatanodeInfo[] targets, final DatanodeInfo source,
    final BlockConstructionStage stage, final int pipelineSize,
    final long minBytesRcvd, final long maxBytesRcvd,
    final long latestGenerationStamp, DataChecksum requestedChecksum)
    throws IOException {
  ClientOperationHeaderProto header =
      DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken);

  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto =
      OpWriteBlockProto.newBuilder().setHeader(header)
          .addAllTargets(PBHelper.convert(targets, 1))
          .setStage(toProto(stage)).setPipelineSize(pipelineSize)
          .setMinBytesRcvd(minBytesRcvd).setMaxBytesRcvd(maxBytesRcvd)
          .setLatestGenerationStamp(latestGenerationStamp)
          .setRequestedChecksum(checksumProto);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hops    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken)).setClientName(client)
      .build();
  return header;
}
项目:hadoop-TCP    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-TCP    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hardfs    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hardfs    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hadoop-on-lustre2    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-on-lustre2    文件:DataTransferProtoUtil.java   
static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk,
    String client, Token<BlockTokenIdentifier> blockToken) {
  ClientOperationHeaderProto header =
    ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(buildBaseHeader(blk, blockToken))
      .setClientName(client)
      .build();
  return header;
}
项目:hadoop    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist)
    .setPinning(pinning)
    .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop    文件:DataTransferProtoUtil.java   
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
    String description) {
  return continueTraceSpan(header.getBaseHeader(), description);
}
项目:aliyun-oss-hadoop-fs    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
      .setHeader(header)
      .setStorageType(PBHelperClient.convertStorageType(storageType))
      .addAllTargets(PBHelperClient.convert(targets, 1))
      .addAllTargetStorageTypes(
          PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
      .setStage(toProto(stage))
      .setPipelineSize(pipelineSize)
      .setMinBytesRcvd(minBytesRcvd)
      .setMaxBytesRcvd(maxBytesRcvd)
      .setLatestGenerationStamp(latestGenerationStamp)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(getCachingStrategy(cachingStrategy))
      .setAllowLazyPersist(allowLazyPersist)
      .setPinning(pinning)
      .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelperClient.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:aliyun-oss-hadoop-fs    文件:Receiver.java   
private TraceScope continueTraceSpan(ClientOperationHeaderProto header,
                                           String description) {
  return continueTraceSpan(header.getBaseHeader(), description);
}
项目:big-c    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist)
    .setPinning(pinning)
    .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:big-c    文件:DataTransferProtoUtil.java   
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
    String description) {
  return continueTraceSpan(header.getBaseHeader(), description);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataTransferProtoUtil.java   
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
    String description) {
  return continueTraceSpan(header.getBaseHeader(), description);
}
项目:FlexMap    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:FlexMap    文件:DataTransferProtoUtil.java   
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
    String description) {
  return continueTraceSpan(header.getBaseHeader(), description);
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputHelper.java   
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
    String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
    BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
    Class<? extends Channel> channelClass) {
  Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
  DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
  boolean connectToDnViaHostname =
      conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
  int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
  ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
  blockCopy.setNumBytes(locatedBlock.getBlockSize());
  ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
          .setToken(PB_HELPER.convert(locatedBlock.getBlockToken())))
      .setClientName(clientName).build();
  ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
  OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
      .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
      .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
      .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
  List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
  for (int i = 0; i < datanodeInfos.length; i++) {
    DatanodeInfo dnInfo = datanodeInfos[i];
    Enum<?> storageType = storageTypes[i];
    Promise<Channel> promise = eventLoopGroup.next().newPromise();
    futureList.add(promise);
    String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
    new Bootstrap().group(eventLoopGroup).channel(channelClass)
        .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

          @Override
          protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
          }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

          @Override
          public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
              initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
                timeoutMs, client, locatedBlock.getBlockToken(), promise);
            } else {
              promise.tryFailure(future.cause());
            }
          }
        });
  }
  return futureList;
}