Java 类org.apache.hadoop.hdfs.protocol.BlockListAsLongs 实例源码

项目:hadoop    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:hadoop    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
项目:hadoop    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = report.getBlocks();
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:aliyun-oss-hadoop-fs    文件:TestDFSShell.java   
private static List<MaterializedReplica> getMaterializedReplicas(
    MiniDFSCluster cluster) throws IOException {
  List<MaterializedReplica> replicas = new ArrayList<>();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks =
      cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        replicas.add(cluster.getMaterializedReplica(i,
            new ExtendedBlock(poolId, b)));
      }
    }
  }
  return replicas;
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.setAlive(true);

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
项目:aliyun-oss-hadoop-fs    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = report.getBlocks();
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:big-c    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:big-c    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:big-c    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
项目:big-c    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = report.getBlocks();
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:hadoop-EAR    文件:IncrementalBlockReport.java   
public IncrementalBlockReport(Block[] blocks) {

    currentBlock = 0;
    currentHint = 0;

    if (blocks == null || blocks.length == 0) {
      this.delHintsMap = LightWeightBitSet.getBitSet(0);
      this.delHints = new String[0];
      this.blocks = new long[0];
      return;
    }
    this.delHintsMap = LightWeightBitSet.getBitSet(blocks.length);

    ArrayList<String> hints = new ArrayList<String>(0);

    for (int i = 0; i < blocks.length; i++) {
      Block b = blocks[i];
      if (b instanceof ReceivedBlockInfo) {
        ReceivedBlockInfo rbi = (ReceivedBlockInfo) b;
        hints.add(rbi.getDelHints());
        LightWeightBitSet.set(delHintsMap, i);
      }
    }
    this.delHints = hints.toArray(new String[hints.size()]);
    this.blocks = BlockListAsLongs.convertToArrayLongs(blocks);
  }
项目:hadoop-plus    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + " " + blist.getNumberOfBlocks()
         + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
    return new FinalizeCommand(poolId);
  return null;
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hadoop-plus    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  node.setStorageID("dummy-storage");
  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertTrue(node.isFirstBlockReport());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, "pool", new BlockListAsLongs(null, null));
  verify(node).receivedBlockReport();
  assertFalse(node.isFirstBlockReport());
}
项目:hadoop-plus    文件:TestBlockReport.java   
/**
  * Test creates a file and closes it.
  * The second datanode is started in the cluster.
  * As soon as the replication process is completed test runs
  * Block report and checks that no underreplicated blocks are left
  *
  * @throws IOException in case of an error
  */
 @Test
 public void blockReport_06() throws Exception {
   final String METHOD_NAME = GenericTestUtils.getMethodName();
   Path filePath = new Path("/" + METHOD_NAME + ".dat");
   final int DN_N1 = DN_N0 + 1;

   ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
   startDNandWait(filePath, true);

// all blocks belong to the same file, hence same BP
   DataNode dn = cluster.getDataNodes().get(DN_N1);
   String poolId = cluster.getNamesystem().getBlockPoolId();
   DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
   StorageBlockReport[] report = { new StorageBlockReport(
       new DatanodeStorage(dnR.getStorageID()),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
   cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
   printStats();
   assertEquals("Wrong number of PendingReplication Blocks",
     0, cluster.getNamesystem().getUnderReplicatedBlocks());
 }
项目:FlexMap    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:FlexMap    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:FlexMap    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:hadoop-TCP    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + " " + blist.getNumberOfBlocks()
         + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
    return new FinalizeCommand(poolId);
  return null;
}
项目:hadoop-TCP    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  node.setStorageID("dummy-storage");
  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertTrue(node.isFirstBlockReport());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, "pool", new BlockListAsLongs(null, null));
  verify(node).receivedBlockReport();
  assertFalse(node.isFirstBlockReport());
}
项目:hadoop-TCP    文件:TestBlockReport.java   
/**
  * Test creates a file and closes it.
  * The second datanode is started in the cluster.
  * As soon as the replication process is completed test runs
  * Block report and checks that no underreplicated blocks are left
  *
  * @throws IOException in case of an error
  */
 @Test
 public void blockReport_06() throws Exception {
   final String METHOD_NAME = GenericTestUtils.getMethodName();
   Path filePath = new Path("/" + METHOD_NAME + ".dat");
   final int DN_N1 = DN_N0 + 1;

   ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
   startDNandWait(filePath, true);

// all blocks belong to the same file, hence same BP
   DataNode dn = cluster.getDataNodes().get(DN_N1);
   String poolId = cluster.getNamesystem().getBlockPoolId();
   DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
   StorageBlockReport[] report = { new StorageBlockReport(
       new DatanodeStorage(dnR.getStorageID()),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
   cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
   printStats();
   assertEquals("Wrong number of PendingReplication Blocks",
     0, cluster.getNamesystem().getUnderReplicatedBlocks());
 }
项目:hardfs    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + " " + blist.getNumberOfBlocks()
         + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
    return new FinalizeCommand(poolId);
  return null;
}
项目:hardfs    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  node.setStorageID("dummy-storage");
  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertTrue(node.isFirstBlockReport());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, "pool", new BlockListAsLongs(null, null));
  verify(node).receivedBlockReport();
  assertFalse(node.isFirstBlockReport());
}
项目:hardfs    文件:TestBlockReport.java   
/**
  * Test creates a file and closes it.
  * The second datanode is started in the cluster.
  * As soon as the replication process is completed test runs
  * Block report and checks that no underreplicated blocks are left
  *
  * @throws IOException in case of an error
  */
 @Test
 public void blockReport_06() throws Exception {
   final String METHOD_NAME = GenericTestUtils.getMethodName();
   Path filePath = new Path("/" + METHOD_NAME + ".dat");
   final int DN_N1 = DN_N0 + 1;

   ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
   startDNandWait(filePath, true);

// all blocks belong to the same file, hence same BP
   DataNode dn = cluster.getDataNodes().get(DN_N1);
   String poolId = cluster.getNamesystem().getBlockPoolId();
   DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
   StorageBlockReport[] report = { new StorageBlockReport(
       new DatanodeStorage(dnR.getStorageID()),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
   cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
   printStats();
   assertEquals("Wrong number of PendingReplication Blocks",
     0, cluster.getNamesystem().getUnderReplicatedBlocks());
 }
项目:hadoop-on-lustre2    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean hasStaleStorages = true;
  for(StorageBlockReport r : reports) {
    final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
    hasStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !nn.isStandbyState() &&
      !hasStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
项目:hadoop-on-lustre2    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:hadoop-on-lustre2    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage,
          new BlockListAsLongs(null, null).getBlockListAsLongs())
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
项目:hadoop-on-lustre2    文件:TestDnRespectsBlockReportSplitThreshold.java   
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
项目:cumulus    文件:TestBlockReport.java   
/**
 * Test creates a file and closes it.
 * The second datanode is started in the cluster.
 * As soon as the replication process is completed test runs
 * Block report and checks that no underreplicated blocks are left
 *
 * @throws IOException in case of an error
 */
@Test
public void blockReport_06() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  final int DN_N1 = DN_N0 + 1;

  ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
  startDNandWait(filePath, true);

  cluster.getNameNode().blockReport(
    cluster.getDataNodes().get(DN_N1).dnRegistration,
    new BlockListAsLongs(blocks, null).getBlockListAsLongs());
  printStats();
  assertEquals("Wrong number of PendingReplication Blocks",
    0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
项目:RDFS    文件:IncrementalBlockReport.java   
public IncrementalBlockReport(Block[] blocks) {

    currentBlock = 0;
    currentHint = 0;

    if (blocks == null || blocks.length == 0) {
      this.delHintsMap = LightWeightBitSet.getBitSet(0);
      this.delHints = new String[0];
      this.blocks = new long[0];
      return;
    }
    this.delHintsMap = LightWeightBitSet.getBitSet(blocks.length);

    ArrayList<String> hints = new ArrayList<String>(0);

    for (int i = 0; i < blocks.length; i++) {
      Block b = blocks[i];
      if (b instanceof ReceivedBlockInfo) {
        ReceivedBlockInfo rbi = (ReceivedBlockInfo) b;
        hints.add(rbi.getDelHints());
        LightWeightBitSet.set(delHintsMap, i);
      }
    }
    this.delHints = hints.toArray(new String[hints.size()]);
    this.blocks = BlockListAsLongs.convertToArrayLongs(blocks);
  }
项目:hadoop    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports,
      BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = reports[r].getBlocks();
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
        blocks, context, (r == reports.length - 1));
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
项目:hadoop    文件:DatanodeProtocolClientSideTranslatorPB.java   
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);

  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
项目:hadoop    文件:DatanodeProtocolServerSideTranslatorPB.java   
@Override
public BlockReportResponseProto blockReport(RpcController controller,
    BlockReportRequestProto request) throws ServiceException {
  DatanodeCommand cmd = null;
  StorageBlockReport[] report = 
      new StorageBlockReport[request.getReportsCount()];

  int index = 0;
  for (StorageBlockReportProto s : request.getReportsList()) {
    final BlockListAsLongs blocks;
    if (s.hasNumberOfBlocks()) { // new style buffer based reports
      int num = (int)s.getNumberOfBlocks();
      Preconditions.checkState(s.getBlocksCount() == 0,
          "cannot send both blocks list and buffers");
      blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
    } else {
      blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
    }
    report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
        blocks);
  }
  try {
    cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), report,
        request.hasContext() ?
            PBHelper.convert(request.getContext()) : null);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  BlockReportResponseProto.Builder builder = 
      BlockReportResponseProto.newBuilder();
  if (cmd != null) {
    builder.setCmd(PBHelper.convert(cmd));
  }
  return builder.build();
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * 
 * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
 * @return the block report for the specified data node
 */
public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid);
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * 
 * @return block reports from all data nodes
 *    BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
 */
public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
  int numDataNodes = dataNodes.size();
  final List<Map<DatanodeStorage, BlockListAsLongs>> result
      = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
  for (int i = 0; i < numDataNodes; ++i) {
    result.add(getBlockReport(bpid, i));
  }
  return result;
}