Java 类org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader 实例源码

项目:hadoop    文件:DFSOutputStream.java   
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
项目:hadoop    文件:DFSPacket.java   
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hadoop    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:hadoop    文件:TestDFSPacket.java   
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
项目:aliyun-oss-hadoop-fs    文件:DFSOutputStream.java   
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
    long offsetInBlock, long seqno, boolean lastPacketInBlock)
    throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
      getChecksumSize(), lastPacketInBlock);
}
项目:aliyun-oss-hadoop-fs    文件:DFSPacket.java   
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
public DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSPacket.java   
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
项目:aliyun-oss-hadoop-fs    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:big-c    文件:DFSOutputStream.java   
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
项目:big-c    文件:DFSPacket.java   
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:big-c    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
项目:big-c    文件:TestDFSPacket.java   
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSOutputStream.java   
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, 
 *                including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
    int checksumSize) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hadoop-plus    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hadoop-plus    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:FlexMap    文件:DFSOutputStream.java   
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private Packet createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new Packet(buf, chunksPerPkt, offsetInBlock, seqno, getChecksumSize());
}
项目:FlexMap    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, 
 *                including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
private Packet(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
    int checksumSize) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:FlexMap    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hops    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 *
 * @param pktSize
 *     maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt
 *     maximum number of chunks per packet.
 * @param offsetInBlock
 *     offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hops    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
    throws IOException {
  PacketHeader hdr = new PacketHeader(8,                   // size of packet
      block.getNumBytes(), // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0,                   // chunk length
      false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutput.java   
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
  if (evt instanceof IdleStateEvent) {
    IdleStateEvent e = (IdleStateEvent) evt;
    if (e.state() == READER_IDLE) {
      failed(ctx.channel(),
        () -> new IOException("Timeout(" + timeoutMs + "ms) waiting for response"));
    } else if (e.state() == WRITER_IDLE) {
      PacketHeader heartbeat = new PacketHeader(4, 0, HEART_BEAT_SEQNO, false, 0, false);
      int len = heartbeat.getSerializedSize();
      ByteBuf buf = alloc.buffer(len);
      heartbeat.putInBuffer(buf.nioBuffer(0, len));
      buf.writerIndex(len);
      ctx.channel().writeAndFlush(buf);
    }
    return;
  }
  super.userEventTriggered(ctx, evt);
}
项目:hadoop-TCP    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hadoop-TCP    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hardfs    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hardfs    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hadoop-on-lustre2    文件:DFSOutputStream.java   
/**
 * Create a new packet.
 * 
 * @param pktSize maximum size of the packet, including checksum data and actual data.
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 */
Packet(int pktSize, int chunksPerPkt, long offsetInBlock) {
  this.lastPacketInBlock = false;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = currentSeqno;
  currentSeqno++;

  buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN + pktSize];

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksum.getChecksumSize());
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
项目:hadoop-on-lustre2    文件:TestDataTransferProtocol.java   
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
项目:hadoop    文件:DFSOutputStream.java   
private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
项目:hadoop    文件:RemoteBlockReader2.java   
private void readTrailingEmptyPacket() throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Reading empty packet at end of read");
  }

  packetReceiver.receiveNextPacket(in);

  PacketHeader trailer = packetReceiver.getHeader();
  if (!trailer.isLastPacketInBlock() ||
     trailer.getDataLen() != 0) {
    throw new IOException("Expected empty end-of-read packet! Header: " +
                          trailer);
  }
}
项目:hadoop    文件:BlockSender.java   
/**
 * Write packet header into {@code pkt},
 * return the length of the header written.
 */
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
  pkt.clear();
  // both syncBlock and syncPacket are false
  PacketHeader header = new PacketHeader(packetLen, offset, seqno,
      (dataLen == 0), dataLen, false);

  int size = header.getSerializedSize();
  pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
  header.putInBuffer(pkt);
  return size;
}
项目:aliyun-oss-hadoop-fs    文件:DFSOutputStream.java   
protected void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  DFSClient.LOG.debug("computePacketChunkSize: src={}, chunkSize={}, "
          + "chunksPerPacket={}, packetSize={}",
      src, chunkSize, chunksPerPacket, packetSize);
}
项目:aliyun-oss-hadoop-fs    文件:RemoteBlockReader2.java   
private void readTrailingEmptyPacket() throws IOException {
  LOG.trace("Reading empty packet at end of read");

  packetReceiver.receiveNextPacket(in);

  PacketHeader trailer = packetReceiver.getHeader();
  if (!trailer.isLastPacketInBlock() ||
      trailer.getDataLen() != 0) {
    throw new IOException("Expected empty end-of-read packet! Header: " +
        trailer);
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockSender.java   
/**
 * Write packet header into {@code pkt},
 * return the length of the header written.
 */
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
  pkt.clear();
  // both syncBlock and syncPacket are false
  PacketHeader header = new PacketHeader(packetLen, offset, seqno,
      (dataLen == 0), dataLen, false);

  int size = header.getSerializedSize();
  pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
  header.putInBuffer(pkt);
  return size;
}
项目:big-c    文件:DFSOutputStream.java   
private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
项目:big-c    文件:RemoteBlockReader2.java   
private void readTrailingEmptyPacket() throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Reading empty packet at end of read");
  }

  packetReceiver.receiveNextPacket(in);

  PacketHeader trailer = packetReceiver.getHeader();
  if (!trailer.isLastPacketInBlock() ||
     trailer.getDataLen() != 0) {
    throw new IOException("Expected empty end-of-read packet! Header: " +
                          trailer);
  }
}
项目:big-c    文件:BlockSender.java   
/**
 * Write packet header into {@code pkt},
 * return the length of the header written.
 */
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
  pkt.clear();
  // both syncBlock and syncPacket are false
  PacketHeader header = new PacketHeader(packetLen, offset, seqno,
      (dataLen == 0), dataLen, false);

  int size = header.getSerializedSize();
  pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
  header.putInBuffer(pkt);
  return size;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:RemoteBlockReader2.java   
private void readTrailingEmptyPacket() throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Reading empty packet at end of read");
  }

  packetReceiver.receiveNextPacket(in);

  PacketHeader trailer = packetReceiver.getHeader();
  if (!trailer.isLastPacketInBlock() ||
     trailer.getDataLen() != 0) {
    throw new IOException("Expected empty end-of-read packet! Header: " +
                          trailer);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockSender.java   
/**
 * Write packet header into {@code pkt},
 * return the length of the header written.
 */
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
  pkt.clear();
  // both syncBlock and syncPacket are false
  PacketHeader header = new PacketHeader(packetLen, offset, seqno,
      (dataLen == 0), dataLen, false);

  int size = header.getSerializedSize();
  pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
  header.putInBuffer(pkt);
  return size;
}