Java 类org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand 实例源码

项目:hadoop    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
项目:big-c    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hops    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
    case BalancerBandwidthCommand:
      return PBHelper.convert(proto.getBalancerCmd());
    case BlockCommand:
      return PBHelper.convert(proto.getBlkCmd());
    case BlockRecoveryCommand:
      return PBHelper.convert(proto.getRecoveryCmd());
    case FinalizeCommand:
      return PBHelper.convert(proto.getFinalizeCmd());
    case KeyUpdateCommand:
      return PBHelper.convert(proto.getKeyUpdateCmd());
    case RegisterCommand:
      return REG_CMD;
  }
  return null;
}
项目:hadoop-TCP    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hardfs    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
项目:hadoop-on-lustre2    文件:PBHelper.java   
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
项目:hadoop    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hadoop    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
    builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
        .setBlkECRecoveryCmd(
            convert((BlockECRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:big-c    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:big-c    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:big-c    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:hadoop-plus    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-plus    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:FlexMap    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:FlexMap    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:FlexMap    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:hops    文件:DatanodeManager.java   
/**
 * Handle heartbeat from datanodes.
 */
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
    final String blockPoolId, long capacity, long dfsUsed, long remaining,
    long blockPoolUsed, int xceiverCount, int maxTransfers, int failedVolumes)
    throws IOException {
  synchronized (heartbeatManager) {
    synchronized (datanodeMap) {
      DatanodeDescriptor nodeinfo = null;
      try {
        nodeinfo = getDatanode(nodeReg);
      } catch (UnregisteredNodeException e) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      // Check if this datanode should actually be shutdown instead. 
      if (nodeinfo != null && nodeinfo.isDisallowed()) {
        setDatanodeDead(nodeinfo);
        throw new DisallowedDatanodeException(nodeinfo);
      }

      if (nodeinfo == null || !nodeinfo.isAlive) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, remaining,
          blockPoolUsed, xceiverCount, failedVolumes);

      //check lease recovery
      BlockInfoUnderConstruction[] blocks =
          nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
      if (blocks != null) {
        BlockRecoveryCommand brCommand =
            new BlockRecoveryCommand(blocks.length);
        for (BlockInfoUnderConstruction b : blocks) {
          brCommand.add(new RecoveringBlock(new ExtendedBlock(blockPoolId, b),
              getDataNodeDescriptorsTx(b), b.getBlockRecoveryId()));
        }
        return new DatanodeCommand[]{brCommand};
      }

      final List<DatanodeCommand> cmds = new ArrayList<>();
      //check pending replication
      List<BlockTargetPair> pendingList =
          nodeinfo.getReplicationCommand(maxTransfers);
      if (pendingList != null) {
        cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
            pendingList));
      }
      //check block invalidation
      Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
      if (blks != null) {
        cmds.add(
            new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId,
                blks));
      }

      blockManager.addKeyUpdateCommand(cmds, nodeinfo);

      // check for balancer bandwidth update
      if (nodeinfo.getBalancerBandwidth() > 0) {
        cmds.add(
            new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth()));
        // set back to 0 to indicate that datanode has been sent the new value
        nodeinfo.setBalancerBandwidth(0);
      }

      if (!cmds.isEmpty()) {
        return cmds.toArray(new DatanodeCommand[cmds.size()]);
      }
    }
  }

  return new DatanodeCommand[0];
}
项目:hops    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hops    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
    case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
          .setBalancerCmd(
              PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
          .setKeyUpdateCmd(
              PBHelper.convert((KeyUpdateCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_RECOVERBLOCK:
      builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
          .setRecoveryCmd(
              PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_FINALIZE:
      builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
          .setFinalizeCmd(
              PBHelper.convert((FinalizeCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_REGISTER:
      builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
          .setRegisterCmd(REG_CMD_PROTO);
      break;
    case DatanodeProtocol.DNA_TRANSFER:
    case DatanodeProtocol.DNA_INVALIDATE:
    case DatanodeProtocol.DNA_SHUTDOWN:
      builder.setCmdType(DatanodeCommandProto.Type.BlockCommand)
          .setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_UNKNOWN: //Not expected
    default:
      builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hops    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:hadoop-TCP    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hadoop-TCP    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
项目:hadoop-on-lustre    文件:FSNamesystem.java   
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
    long capacity, long dfsUsed, long remaining,
    int xceiverCount, int xmitsInProgress) throws IOException {
  DatanodeCommand cmd = null;
  synchronized (heartbeats) {
    synchronized (datanodeMap) {
      DatanodeDescriptor nodeinfo = null;
      try {
        nodeinfo = getDatanode(nodeReg);
      } catch(UnregisteredDatanodeException e) {
        return new DatanodeCommand[]{DatanodeCommand.REGISTER};
      }

      // Check if this datanode should actually be shutdown instead. 
      if (nodeinfo != null && shouldNodeShutdown(nodeinfo)) {
        setDatanodeDead(nodeinfo);
        throw new DisallowedDatanodeException(nodeinfo);
      }

      if (nodeinfo == null || !nodeinfo.isAlive) {
        return new DatanodeCommand[]{DatanodeCommand.REGISTER};
      }

      updateStats(nodeinfo, false);
      nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
      updateStats(nodeinfo, true);

      //check lease recovery
      cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
      if (cmd != null) {
        return new DatanodeCommand[] {cmd};
      }

      ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
      //check pending replication
      cmd = nodeinfo.getReplicationCommand(
            maxReplicationStreams - xmitsInProgress);
      if (cmd != null) {
        cmds.add(cmd);
      }
      //check block invalidation
      cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
      if (cmd != null) {
        cmds.add(cmd);
      }
      // check access key update
      if (isAccessTokenEnabled && nodeinfo.needKeyUpdate) {
        cmds.add(new KeyUpdateCommand(accessTokenHandler.exportKeys()));
        nodeinfo.needKeyUpdate = false;
      }
      // check for balancer bandwidth update
      if (nodeinfo.getBalancerBandwidth() > 0) {
        cmds.add(new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth()));
        // set back to 0 to indicate that datanode has been sent the new value
        nodeinfo.setBalancerBandwidth(0);
      }
      if (!cmds.isEmpty()) {
        return cmds.toArray(new DatanodeCommand[cmds.size()]);
      }
    }
  }

  //check distributed upgrade
  cmd = getDistributedUpgradeCommand();
  if (cmd != null) {
    return new DatanodeCommand[] {cmd};
  }
  return null;
}
项目:hadoop-on-lustre    文件:DataNode.java   
/**
   * 
   * @param cmd
   * @return true if further processing may be required or false otherwise. 
   * @throws IOException
   */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.incrBlocksReplicated(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(toDelete);
      }
      data.invalidate(toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.incrBlocksRemoved(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    this.shutdown();
    return false;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    storage.finalizeUpgrade();
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
    if (isBlockTokenEnabled) {
      blockTokenSecretManager.setKeys(((KeyUpdateCommand) cmd).getExportedKeys());
    }
    break;
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE");
    int vsn = ((BalancerBandwidthCommand) cmd).getBalancerBandwidthVersion();
    if (vsn >= 1) {
      long bandwidth = 
                 ((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue();
      if (bandwidth > 0) {
        DataXceiverServer dxcs =
                     (DataXceiverServer) this.dataXceiverServer.getRunnable();
        dxcs.balanceThrottler.setBandwidth(bandwidth);
      }
    }
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
项目:hardfs    文件:PBHelper.java   
public static BalancerBandwidthCommandProto convert(
    BalancerBandwidthCommand bbCmd) {
  return BalancerBandwidthCommandProto.newBuilder()
      .setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
项目:hardfs    文件:PBHelper.java   
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
项目:hardfs    文件:PBHelper.java   
public static BalancerBandwidthCommand convert(
    BalancerBandwidthCommandProto balancerCmd) {
  return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}