Java 类org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto 实例源码

项目:hbase    文件:FanOutOneBlockAsyncDFSOutputHelper.java   
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    Enum<?> storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
    DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
    throws IOException {
  Promise<Void> saslPromise = channel.eventLoop().newPromise();
  trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
  saslPromise.addListener(new FutureListener<Void>() {

    @Override
    public void operationComplete(Future<Void> future) throws Exception {
      if (future.isSuccess()) {
        // setup response processing pipeline first, then send request.
        processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
        requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
      } else {
        promise.tryFailure(future.cause());
      }
    }
  });
}
项目:hadoop-TCP    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()),
      (proto.hasCachingStrategy() ?
          getCachingStrategy(proto.getCachingStrategy()) :
        CachingStrategy.newDefaultStrategy()));
}
项目:hardfs    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()),
      (proto.hasCachingStrategy() ?
          getCachingStrategy(proto.getCachingStrategy()) :
        CachingStrategy.newDefaultStrategy()));
}
项目:hadoop-on-lustre2    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()),
      (proto.hasCachingStrategy() ?
          getCachingStrategy(proto.getCachingStrategy()) :
        CachingStrategy.newDefaultStrategy()));
}
项目:hadoop    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelperClient.convertStorageType(proto.getStorageType()),
        PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelperClient.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
项目:big-c    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
          (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   } finally {
    if (traceScope != null) traceScope.close();
   }
}
项目:hadoop-plus    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-plus    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()),
      fromProto(proto.getStage()),
      proto.getPipelineSize(),
      proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
      proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
项目:FlexMap    文件:Receiver.java   
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
          (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   } finally {
    if (traceScope != null) traceScope.close();
   }
}
项目:hops    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken, final String clientName,
    final DatanodeInfo[] targets, final DatanodeInfo source,
    final BlockConstructionStage stage, final int pipelineSize,
    final long minBytesRcvd, final long maxBytesRcvd,
    final long latestGenerationStamp, DataChecksum requestedChecksum)
    throws IOException {
  ClientOperationHeaderProto header =
      DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken);

  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto =
      OpWriteBlockProto.newBuilder().setHeader(header)
          .addAllTargets(PBHelper.convert(targets, 1))
          .setStage(toProto(stage)).setPipelineSize(pipelineSize)
          .setMinBytesRcvd(minBytesRcvd).setMaxBytesRcvd(maxBytesRcvd)
          .setLatestGenerationStamp(latestGenerationStamp)
          .setRequestedChecksum(checksumProto);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hops    文件:Receiver.java   
/**
 * Receive OP_WRITE_BLOCK
 */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto =
      OpWriteBlockProto.parseFrom(vintPrefixed(in));
  writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
      PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
      proto.getHeader().getClientName(),
      PBHelper.convert(proto.getTargetsList()),
      PBHelper.convert(proto.getSource()), fromProto(proto.getStage()),
      proto.getPipelineSize(), proto.getMinBytesRcvd(),
      proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(),
      fromProto(proto.getRequestedChecksum()));
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputHelper.java   
private static void requestWriteBlock(Channel channel, Enum<?> storageType,
    OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
  OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build();
  int protoLen = proto.getSerializedSize();
  ByteBuf buffer =
      channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen);
  buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
  buffer.writeByte(Op.WRITE_BLOCK.code);
  proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
  channel.writeAndFlush(buffer);
}
项目:hadoop-TCP    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hardfs    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-on-lustre2    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .addAllTargets(PBHelper.convert(targets, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist)
    .setPinning(pinning)
    .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:aliyun-oss-hadoop-fs    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes,
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
      DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
      .setHeader(header)
      .setStorageType(PBHelperClient.convertStorageType(storageType))
      .addAllTargets(PBHelperClient.convert(targets, 1))
      .addAllTargetStorageTypes(
          PBHelperClient.convertStorageTypes(targetStorageTypes, 1))
      .setStage(toProto(stage))
      .setPipelineSize(pipelineSize)
      .setMinBytesRcvd(minBytesRcvd)
      .setMaxBytesRcvd(maxBytesRcvd)
      .setLatestGenerationStamp(latestGenerationStamp)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(getCachingStrategy(cachingStrategy))
      .setAllowLazyPersist(allowLazyPersist)
      .setPinning(pinning)
      .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelperClient.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:aliyun-oss-hadoop-fs    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:aliyun-oss-hadoop-fs    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:big-c    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist,
    final boolean pinning,
    final boolean[] targetPinnings) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist)
    .setPinning(pinning)
    .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1));

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:big-c    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:big-c    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop-plus    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop-plus    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:FlexMap    文件:Sender.java   
@Override
public void writeBlock(final ExtendedBlock blk,
    final StorageType storageType, 
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes, 
    final DatanodeInfo source,
    final BlockConstructionStage stage,
    final int pipelineSize,
    final long minBytesRcvd,
    final long maxBytesRcvd,
    final long latestGenerationStamp,
    DataChecksum requestedChecksum,
    final CachingStrategy cachingStrategy,
    final boolean allowLazyPersist) throws IOException {
  ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
      blk, clientName, blockToken);

  ChecksumProto checksumProto =
    DataTransferProtoUtil.toProto(requestedChecksum);

  OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder()
    .setHeader(header)
    .setStorageType(PBHelper.convertStorageType(storageType))
    .addAllTargets(PBHelper.convert(targets, 1))
    .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1))
    .setStage(toProto(stage))
    .setPipelineSize(pipelineSize)
    .setMinBytesRcvd(minBytesRcvd)
    .setMaxBytesRcvd(maxBytesRcvd)
    .setLatestGenerationStamp(latestGenerationStamp)
    .setRequestedChecksum(checksumProto)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .setAllowLazyPersist(allowLazyPersist);

  if (source != null) {
    proto.setSource(PBHelper.convertDatanodeInfo(source));
  }

  send(out, Op.WRITE_BLOCK, proto.build());
}
项目:FlexMap    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:FlexMap    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:hops    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:hops    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputHelper.java   
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
    String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
    BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
    Class<? extends Channel> channelClass) {
  Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
  DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
  boolean connectToDnViaHostname =
      conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
  int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
  ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
  blockCopy.setNumBytes(locatedBlock.getBlockSize());
  ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
      .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
          .setToken(PB_HELPER.convert(locatedBlock.getBlockToken())))
      .setClientName(clientName).build();
  ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
  OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
      .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
      .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
      .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
  List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
  for (int i = 0; i < datanodeInfos.length; i++) {
    DatanodeInfo dnInfo = datanodeInfos[i];
    Enum<?> storageType = storageTypes[i];
    Promise<Channel> promise = eventLoopGroup.next().newPromise();
    futureList.add(promise);
    String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
    new Bootstrap().group(eventLoopGroup).channel(channelClass)
        .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

          @Override
          protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
          }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

          @Override
          public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
              initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
                timeoutMs, client, locatedBlock.getBlockToken(), promise);
            } else {
              promise.tryFailure(future.cause());
            }
          }
        });
  }
  return futureList;
}
项目:hadoop-TCP    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}
项目:hadoop-TCP    文件:DataTransferProtoUtil.java   
static OpWriteBlockProto.BlockConstructionStage toProto(
    BlockConstructionStage stage) {
  return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name());
}
项目:hardfs    文件:DataTransferProtoUtil.java   
static BlockConstructionStage fromProto(
    OpWriteBlockProto.BlockConstructionStage stage) {
  return BlockConstructionStage.valueOf(stage.name());
}