Java 类org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status 实例源码

项目:hadoop    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:hadoop    文件:PipelineAck.java   
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
项目:hadoop    文件:PipelineAck.java   
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
项目:hadoop    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:hadoop    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
项目:hadoop    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:hadoop    文件:TestDataTransferProtocol.java   
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status=" + response.getStatus().name()
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:PipelineAck.java   
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
项目:aliyun-oss-hadoop-fs    文件:PipelineAck.java   
/**
 * Returns the OOB status if this ack contains one.
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
项目:aliyun-oss-hadoop-fs    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenIdentifier.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:aliyun-oss-hadoop-fs    文件:TestDataTransferProtocol.java   
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
项目:big-c    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:big-c    文件:PipelineAck.java   
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
项目:big-c    文件:PipelineAck.java   
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
项目:big-c    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:big-c    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
项目:big-c    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:big-c    文件:TestDataTransferProtocol.java   
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
项目:hops    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[]{0, 3, 129};
  int lengths[] = new int[]{30, 300, 512, 513, 1025};
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
          " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2) spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PipelineAck.java   
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status reply : proto.getStatusList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (reply.getNumber() >= OOB_START && reply.getNumber() <= OOB_END) {
      return reply;
    }
  }
  return null;
}
项目:hops    文件:TestBlockReplacement.java   
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
      HdfsServerConstants.READ_TIMEOUT);
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hops    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken, final String clientName,
    final DatanodeInfo[] targets) throws IOException {
  checkAccess(null, true, blk, blockToken, Op.TRANSFER_BLOCK,
      BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:hadoop-plus    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-plus    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:hadoop-plus    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hops    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
    throws IOException {
  PacketHeader hdr = new PacketHeader(8,                   // size of packet
      block.getNumBytes(), // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0,                   // chunk length
      false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hadoop-plus    文件:TestDataTransferProtocol.java   
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], null, stage,
      0, block.getNumBytes(), block.getNumBytes(), newGS,
      DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
项目:hadoop-plus    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto =
    BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:FlexMap    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:FlexMap    文件:PipelineAck.java   
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status reply : proto.getStatusList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (reply.getNumber() >= OOB_START && reply.getNumber() <= OOB_END) {
      return reply;
    }
  }
  return null;
}
项目:FlexMap    文件:DataXceiver.java   
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } finally {
    IOUtils.closeStream(out);
  }
}
项目:hops    文件:TestDataTransferProtocol.java   
private void testWrite(ExtendedBlock block, BlockConstructionStage stage,
    long newGS, String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], null, stage, 0, block.getNumBytes(),
      block.getNumBytes(), newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
项目:FlexMap    文件:TestClientBlockVerification.java   
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}