Java 类org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock 实例源码

项目:hadoop    文件:DataNode.java   
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {

  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
项目:hadoop    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hadoop    文件:InterDatanodeProtocolServerSideTranslatorPB.java   
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
    RpcController unused, InitReplicaRecoveryRequestProto request)
    throws ServiceException {
  RecoveringBlock b = PBHelper.convert(request.getBlock());
  ReplicaRecoveryInfo r;
  try {
    r = impl.initReplicaRecovery(b);
  } catch (IOException e) {
    throw new ServiceException(e);
  }

  if (r == null) {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(false)
        .build();
  } else {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(true)
        .setBlock(PBHelper.convert(r))
        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
  }
}
项目:hadoop    文件:TestInterDatanodeProtocol.java   
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:hadoop    文件:TestBlockRecovery.java   
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {

  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);

  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
项目:hadoop    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };

  List<RecoveringBlock> blks = ImmutableList.of(
    new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
    new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
  );

  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());

  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);

  List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
      cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
项目:aliyun-oss-hadoop-fs    文件:BlockRecoveryWorker.java   
public Daemon recoverBlocks(final String who,
    final Collection<RecoveringBlock> blocks) {
  Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          if (b.isStriped()) {
            new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
          } else {
            new RecoveryTaskContiguous(b).recover();
          }
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
项目:aliyun-oss-hadoop-fs    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static RecoveringBlockProto convert(RecoveringBlock b) {
  if (b == null) {
    return null;
  }
  LocatedBlockProto lb = PBHelperClient.convertLocatedBlock(b);
  RecoveringBlockProto.Builder builder = RecoveringBlockProto.newBuilder();
  builder.setBlock(lb).setNewGenStamp(b.getNewGenerationStamp());
  if(b.getNewBlock() != null)
    builder.setTruncateBlock(PBHelperClient.convert(b.getNewBlock()));
  if (b instanceof RecoveringStripedBlock) {
    RecoveringStripedBlock sb = (RecoveringStripedBlock) b;
    builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(
        sb.getErasureCodingPolicy()));
    builder.setBlockIndices(PBHelperClient.getByteString(sb.getBlockIndices()));
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static RecoveringBlock convert(RecoveringBlockProto b) {
  LocatedBlock lb = PBHelperClient.convertLocatedBlockProto(b.getBlock());
  RecoveringBlock rBlock;
  if (b.hasTruncateBlock()) {
    rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(),
        PBHelperClient.convert(b.getTruncateBlock()));
  } else {
    rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(),
        b.getNewGenStamp());
  }

  if (b.hasEcPolicy()) {
    assert b.hasBlockIndices();
    byte[] indices = b.getBlockIndices().toByteArray();
    rBlock = new RecoveringStripedBlock(rBlock, indices,
        PBHelperClient.convertErasureCodingPolicy(b.getEcPolicy()));
  }
  return rBlock;
}
项目:aliyun-oss-hadoop-fs    文件:InterDatanodeProtocolServerSideTranslatorPB.java   
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
    RpcController unused, InitReplicaRecoveryRequestProto request)
    throws ServiceException {
  RecoveringBlock b = PBHelper.convert(request.getBlock());
  ReplicaRecoveryInfo r;
  try {
    r = impl.initReplicaRecovery(b);
  } catch (IOException e) {
    throw new ServiceException(e);
  }

  if (r == null) {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(false)
        .build();
  } else {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(true)
        .setBlock(PBHelperClient.convert(r))
        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestInterDatanodeProtocol.java   
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
  throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));

  for(RecoveringBlock rBlock: initRecoveringBlocks()){
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
        recoveryWorker.new RecoveryTaskContiguous(rBlock);
    BlockRecoveryWorker.RecoveryTaskContiguous spyTask
        = spy(RecoveryTaskContiguous);
    spyTask.recover();
    verify(spyTask, never()).syncBlock(anyListOf(BlockRecord.class));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  doThrow(new IOException()).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));

  for(RecoveringBlock rBlock: initRecoveringBlocks()){
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
        recoveryWorker.new RecoveryTaskContiguous(rBlock);
    BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
    try {
      spyTask.recover();
      fail();
    } catch(IOException e){
      GenericTestUtils.assertExceptionContains("All datanodes failed", e);
    }
    verify(spyTask, never()).syncBlock(anyListOf(BlockRecord.class));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));

  for(RecoveringBlock rBlock: initRecoveringBlocks()){
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous =
        recoveryWorker.new RecoveryTaskContiguous(rBlock);
    BlockRecoveryWorker.RecoveryTaskContiguous spyTask
        = spy(RecoveryTaskContiguous);
    spyTask.recover();
  }
  DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };

  List<RecoveringBlock> blks = ImmutableList.of(
    new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
    new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
  );

  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());

  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);

  List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
      cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
项目:big-c    文件:DataNode.java   
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {

  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
项目:big-c    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:big-c    文件:InterDatanodeProtocolServerSideTranslatorPB.java   
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
    RpcController unused, InitReplicaRecoveryRequestProto request)
    throws ServiceException {
  RecoveringBlock b = PBHelper.convert(request.getBlock());
  ReplicaRecoveryInfo r;
  try {
    r = impl.initReplicaRecovery(b);
  } catch (IOException e) {
    throw new ServiceException(e);
  }

  if (r == null) {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(false)
        .build();
  } else {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(true)
        .setBlock(PBHelper.convert(r))
        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
  }
}
项目:big-c    文件:TestInterDatanodeProtocol.java   
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:big-c    文件:TestBlockRecovery.java   
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {

  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);

  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
项目:big-c    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
项目:big-c    文件:TestPBHelper.java   
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };

  List<RecoveringBlock> blks = ImmutableList.of(
    new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
    new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
  );

  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());

  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);

  List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
      cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {

  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hops    文件:TestInterDatanodeProtocol.java   
/**
 * Test to verify that InterDatanode RPC timesout as expected when
 * the server DN does not respond.
 */
@Test(expected = SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy =
        DataNode.createInterDataNodeProtocolProxy(dInfo, conf, 500, false);
    proxy.initReplicaRecovery(
        new RecoveringBlock(new ExtendedBlock("bpid", 1), null, 100));
    fail("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InterDatanodeProtocolServerSideTranslatorPB.java   
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
    RpcController unused, InitReplicaRecoveryRequestProto request)
    throws ServiceException {
  RecoveringBlock b = PBHelper.convert(request.getBlock());
  ReplicaRecoveryInfo r;
  try {
    r = impl.initReplicaRecovery(b);
  } catch (IOException e) {
    throw new ServiceException(e);
  }

  if (r == null) {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(false)
        .build();
  } else {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(true)
        .setBlock(PBHelper.convert(r))
        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestInterDatanodeProtocol.java   
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockRecovery.java   
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {

  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);

  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
      anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
      anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
项目:hops    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *     in case of an error
 */
@Test
public void testRecoveryInProgressException()
    throws IOException, InterruptedException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  try {
    DataNode spyDN = spy(dn);
    doThrow(
        new RecoveryInProgressException("Replica recovery is in progress")).
        when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
    Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
    d.join();
    verify(spyDN, never())
        .syncBlock(any(RecoveringBlock.class), anyListOf(BlockRecord.class));
  } catch (Exception e) {
    e.printStackTrace();
  }

}
项目:hadoop-plus    文件:DataNode.java   
public Daemon recoverBlocks(
    final String who,
    final Collection<RecoveringBlock> blocks) {

  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    @Override
    public void run() {
      for(RecoveringBlock b : blocks) {
        try {
          logRecoverBlock(who, b);
          recoverBlock(b);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED: " + b, e);
        }
      }
    }
  });
  d.start();
  return d;
}
项目:hadoop-plus    文件:InterDatanodeProtocolTranslatorPB.java   
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
    throws IOException {
  InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
      .newBuilder().setBlock(PBHelper.convert(rBlock)).build();
  InitReplicaRecoveryResponseProto resp;
  try {
    resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  if (!resp.getReplicaFound()) {
    // No replica found on the remote node.
    return null;
  } else {
    if (!resp.hasBlock() || !resp.hasState()) {
      throw new IOException("Replica was found but missing fields. " +
          "Req: " + req + "\n" +
          "Resp: " + resp);
    }
  }

  BlockProto b = resp.getBlock();
  return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
      b.getGenStamp(), PBHelper.convert(resp.getState()));
}
项目:hops    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException
 *     in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true,
      DatanodeID.EMPTY_ARRAY, null);
}
项目:hadoop-plus    文件:InterDatanodeProtocolServerSideTranslatorPB.java   
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
    RpcController unused, InitReplicaRecoveryRequestProto request)
    throws ServiceException {
  RecoveringBlock b = PBHelper.convert(request.getBlock());
  ReplicaRecoveryInfo r;
  try {
    r = impl.initReplicaRecovery(b);
  } catch (IOException e) {
    throw new ServiceException(e);
  }

  if (r == null) {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(false)
        .build();
  } else {
    return InitReplicaRecoveryResponseProto.newBuilder()
        .setReplicaFound(true)
        .setBlock(PBHelper.convert(r))
        .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
  }
}
项目:hadoop-plus    文件:TestInterDatanodeProtocol.java   
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
项目:hadoop-plus    文件:TestBlockRecovery.java   
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {

  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);

  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
      anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
      anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
项目:hops    文件:TestPBHelper.java   
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[]{di1, di2};

  List<RecoveringBlock> blks = ImmutableList
      .of(new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
          new RecoveringBlock(getExtendedBlock(2), dnInfo, 3));

  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());

  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);

  List<RecoveringBlock> cmd2Blks =
      Lists.newArrayList(cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
项目:hadoop-plus    文件:TestBlockRecovery.java   
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}