Java 类org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto 实例源码

项目:hadoop    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
项目:hadoop    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:aliyun-oss-hadoop-fs    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status=" + response.getStatus().name()
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSClient.java   
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb);

  try {
    DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(pair.out, smallBufferSize));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " +
        dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelperClient.convert(
        reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtilsClient.cleanup(null, pair.in, pair.out);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DataStreamer.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:big-c    文件:DataTransferProtoUtil.java   
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
项目:big-c    文件:DFSClient.java   
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
项目:big-c    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hadoop-plus    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-plus    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hadoop-plus    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto =
    BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:FlexMap    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:FlexMap    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:FlexMap    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN,
      source.getDatanodeUuid(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
  while (proto.getStatus() == Status.IN_PROGRESS) {
    proto = BlockOpResponseProto.parseDelimitedFrom(reply);
  }
  return proto.getStatus() == Status.SUCCESS;
}
项目:hops    文件:RemoteBlockReader2.java   
static void checkSuccess(BlockOpResponseProto status, Socket sock,
    ExtendedBlock block, String file) throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self=" +
              sock.getLocalSocketAddress() + ", remote=" +
              sock.getRemoteSocketAddress() + ", for file " + file +
              ", for pool " + block.getBlockPoolId() + " block " +
              block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self=" +
          sock.getLocalSocketAddress() + ", remote=" +
          sock.getRemoteSocketAddress() + ", for file " + file +
          ", for pool " + block.getBlockPoolId() + " block " +
          block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hops    文件:DFSTestUtil.java   
/**
 * For {@link TestTransferRbw}
 */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b,
    final DFSClient dfsClient, final DatanodeInfo... datanodes)
    throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream
      .createSocketForPipeline(datanodes[0], datanodes.length, dfsClient);
  final long writeTimeout =
      dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(
      new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout),
          HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hops    文件:TestBlockReplacement.java   
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
      HdfsServerConstants.READ_TIMEOUT);
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:hadoop-TCP    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-TCP    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hadoop-TCP    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto =
    BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:hardfs    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hardfs    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hardfs    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto =
    BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:hadoop-on-lustre2    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
项目:hadoop-on-lustre2    文件:DFSTestUtil.java   
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
项目:hadoop-on-lustre2    文件:TestBlockReplacement.java   
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getDatanodeUuid(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto =
    BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
项目:hadoop    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
项目:hadoop    文件:Dispatcher.java   
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "block move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
项目:hadoop    文件:DataXceiver.java   
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
  BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
    .setStatus(status);
  if (message != null) {
    response.setMessage(message);
  }
  response.build().writeDelimitedTo(out);
  out.flush();
}
项目:hadoop    文件:DataXceiver.java   
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
    .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
    .setChunkOffset(blockSender.getOffset())
    .build();

  BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
    .setStatus(SUCCESS)
    .setReadOpChecksumInfo(ckInfo)
    .build();
  response.writeDelimitedTo(out);
  out.flush();
}
项目:hadoop    文件:DataXceiver.java   
private void checkAccess(OutputStream out, final boolean reply, 
    final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> t,
    final Op op,
    final BlockTokenSecretManager.AccessMode mode) throws IOException {
  if (datanode.isBlockTokenEnabled) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Checking block access token for block '" + blk.getBlockId()
          + "' with mode '" + mode + "'");
    }
    try {
      datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
    } catch(InvalidToken e) {
      try {
        if (reply) {
          BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
            .setStatus(ERROR_ACCESS_TOKEN);
          if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
            DatanodeRegistration dnR = 
              datanode.getDNRegistrationForBP(blk.getBlockPoolId());
            // NB: Unconditionally using the xfer addr w/o hostname
            resp.setFirstBadLink(dnR.getXferAddr());
          }
          resp.build().writeDelimitedTo(out);
          out.flush();
        }
        LOG.warn("Block token verification failed: op=" + op
            + ", remoteAddress=" + remoteAddress
            + ", message=" + e.getLocalizedMessage());
        throw e;
      } finally {
        IOUtils.closeStream(out);
      }
    }
  }
}
项目:hadoop    文件:TestDataTransferProtocol.java   
private void sendResponse(Status status, String firstBadLink,
    String message,
    DataOutputStream out)
throws IOException {
  Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
  if (firstBadLink != null) {
    builder.setFirstBadLink(firstBadLink);
  }
  if (message != null) {
    builder.setMessage(message);
  }
  builder.build()
    .writeDelimitedTo(out);
}
项目:hadoop    文件:TestBlockReplacement.java   
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
项目:aliyun-oss-hadoop-fs    文件:RemoteBlockReader2.java   
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
      + ", self=" + peer.getLocalAddressString()
      + ", remote=" + peer.getRemoteAddressString()
      + ", for file " + file
      + ", for pool " + block.getBlockPoolId()
      + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
项目:aliyun-oss-hadoop-fs    文件:Dispatcher.java   
/** Receive a reportedBlock copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "reportedBlock move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
项目:aliyun-oss-hadoop-fs    文件:DataXceiver.java   
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
  BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
    .setStatus(status);
  if (message != null) {
    response.setMessage(message);
  }
  response.build().writeDelimitedTo(out);
  out.flush();
}
项目:aliyun-oss-hadoop-fs    文件:DataXceiver.java   
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
    .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
    .setChunkOffset(blockSender.getOffset())
    .build();

  BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
    .setStatus(SUCCESS)
    .setReadOpChecksumInfo(ckInfo)
    .build();
  response.writeDelimitedTo(out);
  out.flush();
}