Java 类org.apache.hadoop.hdfs.server.protocol.RegisterCommand 实例源码

项目:hadoop    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
项目:big-c    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hops    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
    case BalancerBandwidthCommand:
      return PBHelper.convert(proto.getBalancerCmd());
    case BlockCommand:
      return PBHelper.convert(proto.getBlkCmd());
    case BlockRecoveryCommand:
      return PBHelper.convert(proto.getRecoveryCmd());
    case FinalizeCommand:
      return PBHelper.convert(proto.getFinalizeCmd());
    case KeyUpdateCommand:
      return PBHelper.convert(proto.getKeyUpdateCmd());
    case RegisterCommand:
      return REG_CMD;
  }
  return null;
}
项目:hadoop-TCP    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hardfs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
    builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
        .setBlkECRecoveryCmd(
            convert((BlockECRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:TestDeadDatanode.java   
/**
 * Test to ensure namenode rejects request from dead datanode
 * - Start a cluster
 * - Shutdown the datanode and wait for it to be marked dead at the namenode
 * - Send datanode requests to Namenode and make sure it is rejected 
 *   appropriately.
 */
@Test
public void testDeadDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = 
    DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);

  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);

  DatanodeProtocol dnp = cluster.getNameNodeRpc();

  ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
      new Block(0), 
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
      null) };
  StorageReceivedDeletedBlocks[] storageBlocks = { 
      new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };

  // Ensure blockReceived call from dead datanode is not rejected with
  // IOException, since it's async, but the node remains unregistered.
  dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  // IBRs are async, make sure the NN processes all of them.
  bm.flushBlockOps();
  assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());

  // Ensure blockReport from dead datanode is rejected with IOException
  StorageBlockReport[] report = { new StorageBlockReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      BlockListAsLongs.EMPTY) };
  try {
    dnp.blockReport(reg, poolId, report,
        new BlockReportContext(1, 0, System.nanoTime(), 0L));
    fail("Expected IOException is not thrown");
  } catch (IOException ex) {
    // Expected
  }

  // Ensure heartbeat from dead datanode is rejected with a command
  // that asks datanode to register again
  StorageReport[] rep = { new StorageReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      false, 0, 0, 0, 0) };
  DatanodeCommand[] cmd =
      dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  assertEquals(1, cmd.length);
  assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
      .getAction());
}
项目:big-c    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hops    文件:DatanodeManager.java   
/**
 * Handle heartbeat from datanodes.
 */
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
    final String blockPoolId, long capacity, long dfsUsed, long remaining,
    long blockPoolUsed, int xceiverCount, int maxTransfers, int failedVolumes)
    throws IOException {
  synchronized (heartbeatManager) {
    synchronized (datanodeMap) {
      DatanodeDescriptor nodeinfo = null;
      try {
        nodeinfo = getDatanode(nodeReg);
      } catch (UnregisteredNodeException e) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      // Check if this datanode should actually be shutdown instead. 
      if (nodeinfo != null && nodeinfo.isDisallowed()) {
        setDatanodeDead(nodeinfo);
        throw new DisallowedDatanodeException(nodeinfo);
      }

      if (nodeinfo == null || !nodeinfo.isAlive) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, remaining,
          blockPoolUsed, xceiverCount, failedVolumes);

      //check lease recovery
      BlockInfoUnderConstruction[] blocks =
          nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
      if (blocks != null) {
        BlockRecoveryCommand brCommand =
            new BlockRecoveryCommand(blocks.length);
        for (BlockInfoUnderConstruction b : blocks) {
          brCommand.add(new RecoveringBlock(new ExtendedBlock(blockPoolId, b),
              getDataNodeDescriptorsTx(b), b.getBlockRecoveryId()));
        }
        return new DatanodeCommand[]{brCommand};
      }

      final List<DatanodeCommand> cmds = new ArrayList<>();
      //check pending replication
      List<BlockTargetPair> pendingList =
          nodeinfo.getReplicationCommand(maxTransfers);
      if (pendingList != null) {
        cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
            pendingList));
      }
      //check block invalidation
      Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
      if (blks != null) {
        cmds.add(
            new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId,
                blks));
      }

      blockManager.addKeyUpdateCommand(cmds, nodeinfo);

      // check for balancer bandwidth update
      if (nodeinfo.getBalancerBandwidth() > 0) {
        cmds.add(
            new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth()));
        // set back to 0 to indicate that datanode has been sent the new value
        nodeinfo.setBalancerBandwidth(0);
      }

      if (!cmds.isEmpty()) {
        return cmds.toArray(new DatanodeCommand[cmds.size()]);
      }
    }
  }

  return new DatanodeCommand[0];
}
项目:hops    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
    case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
          .setBalancerCmd(
              PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
          .setKeyUpdateCmd(
              PBHelper.convert((KeyUpdateCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_RECOVERBLOCK:
      builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
          .setRecoveryCmd(
              PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_FINALIZE:
      builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
          .setFinalizeCmd(
              PBHelper.convert((FinalizeCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_REGISTER:
      builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
          .setRegisterCmd(REG_CMD_PROTO);
      break;
    case DatanodeProtocol.DNA_TRANSFER:
    case DatanodeProtocol.DNA_INVALIDATE:
    case DatanodeProtocol.DNA_SHUTDOWN:
      builder.setCmdType(DatanodeCommandProto.Type.BlockCommand)
          .setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_UNKNOWN: //Not expected
    default:
      builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hardfs    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}