Java 类org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys 实例源码

项目:hadoop    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hadoop    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:aliyun-oss-hadoop-fs    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
项目:big-c    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:big-c    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DataNode.java   
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][],
                            String targetStorageIDs[][]
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      String targetStorageID = targetStorageIDs[i][t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          targetStorageID, rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hadoop-plus    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][] 
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo, dnInfo.getStorageID()),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          receivedDNReg.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:FlexMap    文件:DataNode.java   
/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
  StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
  if (storageInfo == null) {
    // it's null in the case of SimulatedDataSet
    storageInfo = new StorageInfo(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
        nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
        NodeType.DATA_NODE);
  }

  DatanodeID dnId = new DatanodeID(
      streamingAddr.getAddress().getHostAddress(), hostName, 
      storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
          infoSecurePort, getIpcPort());
  return new DatanodeRegistration(dnId, storageInfo, 
      new ExportedBlockKeys(), VersionInfo.getVersion());
}
项目:FlexMap    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:FlexMap    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:FlexMap    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][],
                            String targetStorageIDs[][]
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      String targetStorageID = targetStorageIDs[i][t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          targetStorageID, rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hops    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder =
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hops    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
      VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {new StorageBlockReport(storage,
      BlockReport.builder(NUM_BUCKETS).build())};
  nameNodeProto.blockReport(dnRegistration,
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hops    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks(Block blocks[], DatanodeInfo xferTargets[][])
    throws IOException {
  for (int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for (DatanodeInfo dnInfo : blockTargets) {
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
          new DataStorage(nsInfo, dnInfo.getStorageID()),
          new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks =
          {new ReceivedDeletedBlockInfo(blocks[i],
              ReceivedDeletedBlockInfo.BlockStatus.RECEIVED, null)};
      StorageReceivedDeletedBlocks[] report =
          {new StorageReceivedDeletedBlocks(receivedDNReg.getStorageID(),
              rdBlocks)};
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg,
          nameNode.getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hadoop-TCP    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hadoop-TCP    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hadoop-TCP    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][] 
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo, dnInfo.getStorageID()),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          receivedDNReg.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hardfs    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hardfs    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hardfs    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][] 
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo, dnInfo.getStorageID()),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          receivedDNReg.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hadoop-on-lustre2    文件:NamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
    GetBlockKeysRequestProto request) throws ServiceException {
  ExportedBlockKeys keys;
  try {
    keys = impl.getBlockKeys();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  GetBlockKeysResponseProto.Builder builder = 
      GetBlockKeysResponseProto.newBuilder();
  if (keys != null) {
    builder.setKeys(PBHelper.convert(keys));
  }
  return builder.build();
}
项目:hadoop-on-lustre2    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hadoop-on-lustre2    文件:NNThroughputBenchmark.java   
/**
 * Transfer blocks to another data-node.
 * Just report on behalf of the other data-node
 * that the blocks have been received.
 */
private int transferBlocks( Block blocks[], 
                            DatanodeInfo xferTargets[][],
                            String targetStorageIDs[][]
                          ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
    DatanodeInfo blockTargets[] = xferTargets[i];
    for(int t = 0; t < blockTargets.length; t++) {
      DatanodeInfo dnInfo = blockTargets[t];
      String targetStorageID = targetStorageIDs[i][t];
      DatanodeRegistration receivedDNReg;
      receivedDNReg = new DatanodeRegistration(dnInfo,
        new DataStorage(nsInfo),
        new ExportedBlockKeys(), VersionInfo.getVersion());
      ReceivedDeletedBlockInfo[] rdBlocks = {
        new ReceivedDeletedBlockInfo(
              blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
              null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          targetStorageID, rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
          .getNamesystem().getBlockPoolId(), report);
    }
  }
  return blocks.length;
}
项目:hadoop    文件:KeyManager.java   
public KeyManager(String blockpoolID, NamenodeProtocol namenode,
    boolean encryptDataTransfer, Configuration conf) throws IOException {
  this.namenode = namenode;
  this.encryptDataTransfer = encryptDataTransfer;

  final ExportedBlockKeys keys = namenode.getBlockKeys();
  this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
  if (isBlockTokenEnabled) {
    long updateInterval = keys.getKeyUpdateInterval();
    long tokenLifetime = keys.getTokenLifetime();
    LOG.info("Block token params received from NN: update interval="
        + StringUtils.formatTime(updateInterval)
        + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
    String encryptionAlgorithm = conf.get(
        DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
    this.blockTokenSecretManager = new BlockTokenSecretManager(
        updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
    this.blockTokenSecretManager.addKeys(keys);

    // sync block keys with NN more frequently than NN updates its block keys
    this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
    this.shouldRun = true;
  } else {
    this.blockTokenSecretManager = null;
    this.blockKeyUpdater = null;
  }
}
项目:hadoop    文件:DatanodeRegistration.java   
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
    ExportedBlockKeys keys, String softwareVersion) {
  super(dn);
  this.storageInfo = info;
  this.exportedKeys = keys;
  this.softwareVersion = softwareVersion;
}
项目:hadoop    文件:DataNode.java   
/**
 * After the block pool has contacted the NN, registers that block pool
 * with the secret manager, updating it with the secrets provided by the NN.
 * @throws IOException on error
 */
private synchronized void registerBlockPoolWithSecretManager(
    DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
  ExportedBlockKeys keys = bpRegistration.getExportedKeys();
  if (!hasAnyBlockPoolRegistered) {
    hasAnyBlockPoolRegistered = true;
    isBlockTokenEnabled = keys.isBlockTokenEnabled();
  } else {
    if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
      throw new RuntimeException("Inconsistent configuration of block access"
          + " tokens. Either all block pools must be configured to use block"
          + " tokens, or none may be.");
    }
  }
  if (!isBlockTokenEnabled) return;

  if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
    long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
    long blockTokenLifetime = keys.getTokenLifetime();
    LOG.info("Block token params received from NN: for block pool " +
        blockPoolId + " keyUpdateInterval="
        + blockKeyUpdateInterval / (60 * 1000)
        + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
        + " min(s)");
    final BlockTokenSecretManager secretMgr = 
        new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId,
            dnConf.encryptionAlgorithm);
    blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
  }
}
项目:hadoop    文件:PBHelper.java   
public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
  ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
      .newBuilder();
  builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
      .setKeyUpdateInterval(keys.getKeyUpdateInterval())
      .setTokenLifeTime(keys.getTokenLifetime())
      .setCurrentKey(convert(keys.getCurrentKey()));
  for (BlockKey k : keys.getAllKeys()) {
    builder.addAllKeys(convert(k));
  }
  return builder.build();
}
项目:hadoop    文件:NamenodeProtocolTranslatorPB.java   
@Override
public ExportedBlockKeys getBlockKeys() throws IOException {
  try {
    GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
        VOID_GET_BLOCKKEYS_REQUEST);
    return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertExportedBlockKeys() {
  BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
  ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
      getBlockKey(1), keys);
  ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
  ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
  compare(expKeys, expKeys1);
}
项目:hadoop    文件:TestPBHelper.java   
void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
  BlockKey[] allKeys = expKeys.getAllKeys();
  BlockKey[] allKeys1 = expKeys1.getAllKeys();
  assertEquals(allKeys.length, allKeys1.length);
  for (int i = 0; i < allKeys.length; i++) {
    compare(allKeys[i], allKeys1[i]);
  }
  compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
  assertEquals(expKeys.getKeyUpdateInterval(),
      expKeys1.getKeyUpdateInterval());
  assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
}
项目:hadoop    文件:TestPBHelper.java   
@Test
public void testConvertDatanodeRegistration() {
  DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
  BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
  ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
      getBlockKey(1), keys);
  DatanodeRegistration reg = new DatanodeRegistration(dnId,
      new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
  DatanodeRegistrationProto proto = PBHelper.convert(reg);
  DatanodeRegistration reg2 = PBHelper.convert(proto);
  compare(reg.getStorageInfo(), reg2.getStorageInfo());
  compare(reg.getExportedKeys(), reg2.getExportedKeys());
  compare(reg, reg2);
  assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
项目:aliyun-oss-hadoop-fs    文件:KeyManager.java   
public KeyManager(String blockpoolID, NamenodeProtocol namenode,
    boolean encryptDataTransfer, Configuration conf) throws IOException {
  this.namenode = namenode;
  this.encryptDataTransfer = encryptDataTransfer;

  final ExportedBlockKeys keys = namenode.getBlockKeys();
  this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
  if (isBlockTokenEnabled) {
    long updateInterval = keys.getKeyUpdateInterval();
    long tokenLifetime = keys.getTokenLifetime();
    LOG.info("Block token params received from NN: update interval="
        + StringUtils.formatTime(updateInterval)
        + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
    String encryptionAlgorithm = conf.get(
        DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
    this.blockTokenSecretManager = new BlockTokenSecretManager(
        updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
    this.blockTokenSecretManager.addKeys(keys);

    // sync block keys with NN more frequently than NN updates its block keys
    this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
    this.shouldRun = true;
  } else {
    this.blockTokenSecretManager = null;
    this.blockKeyUpdater = null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeRegistration.java   
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
    ExportedBlockKeys keys, String softwareVersion) {
  super(dn);
  this.storageInfo = info;
  this.exportedKeys = keys;
  this.softwareVersion = softwareVersion;
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
/**
 * After the block pool has contacted the NN, registers that block pool
 * with the secret manager, updating it with the secrets provided by the NN.
 * @throws IOException on error
 */
private synchronized void registerBlockPoolWithSecretManager(
    DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
  ExportedBlockKeys keys = bpRegistration.getExportedKeys();
  if (!hasAnyBlockPoolRegistered) {
    hasAnyBlockPoolRegistered = true;
    isBlockTokenEnabled = keys.isBlockTokenEnabled();
  } else {
    if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
      throw new RuntimeException("Inconsistent configuration of block access"
          + " tokens. Either all block pools must be configured to use block"
          + " tokens, or none may be.");
    }
  }
  if (!isBlockTokenEnabled) return;

  if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
    long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
    long blockTokenLifetime = keys.getTokenLifetime();
    LOG.info("Block token params received from NN: for block pool " +
        blockPoolId + " keyUpdateInterval="
        + blockKeyUpdateInterval / (60 * 1000)
        + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
        + " min(s)");
    final BlockTokenSecretManager secretMgr = 
        new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId,
            dnConf.encryptionAlgorithm);
    blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
  }
}