Java 类org.apache.hadoop.hdfs.server.protocol.StorageReport 实例源码

项目:hadoop    文件:FSNamesystem.java   
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);

    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
项目:hadoop    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary);
}
项目:hadoop    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public StorageReport[] getStorageReports(String bpid)
    throws IOException {
  List<StorageReport> reports;
  synchronized (statsLock) {
    List<FsVolumeImpl> curVolumes = getVolumes();
    reports = new ArrayList<>(curVolumes.size());
    for (FsVolumeImpl volume : curVolumes) {
      try (FsVolumeReference ref = volume.obtainReference()) {
        StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
            false,
            volume.getCapacity(),
            volume.getDfsUsed(),
            volume.getAvailable(),
            volume.getBlockPoolUsed(bpid));
        reports.add(sr);
      } catch (ClosedChannelException e) {
        continue;
      }
    }
  }

  return reports.toArray(new StorageReport[reports.size()]);
}
项目:hadoop    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:hadoop    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:hadoop    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:hadoop    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class));
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:hadoop    文件:TestDatanodeReport.java   
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);

  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());

    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat(boolean requestBlockReportLease)
    throws IOException {
  scheduler.scheduleNextHeartbeat();
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary,
      requestBlockReportLease);
}
项目:aliyun-oss-hadoop-fs    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public StorageReport[] getStorageReports(String bpid)
    throws IOException {
  List<StorageReport> reports;
  synchronized (statsLock) {
    List<FsVolumeImpl> curVolumes = volumes.getVolumes();
    reports = new ArrayList<>(curVolumes.size());
    for (FsVolumeImpl volume : curVolumes) {
      try (FsVolumeReference ref = volume.obtainReference()) {
        StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
            false,
            volume.getCapacity(),
            volume.getDfsUsed(),
            volume.getAvailable(),
            volume.getBlockPoolUsed(bpid));
        reports.add(sr);
      } catch (ClosedChannelException e) {
        continue;
      }
    }
  }

  return reports.toArray(new StorageReport[reports.size()]);
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null, true).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class),
        Mockito.anyBoolean());
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeReport.java   
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);

  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());

    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
项目:big-c    文件:FSNamesystem.java   
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);

    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
项目:big-c    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary);
}
项目:big-c    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public StorageReport[] getStorageReports(String bpid)
    throws IOException {
  List<StorageReport> reports;
  synchronized (statsLock) {
    List<FsVolumeImpl> curVolumes = getVolumes();
    reports = new ArrayList<>(curVolumes.size());
    for (FsVolumeImpl volume : curVolumes) {
      try (FsVolumeReference ref = volume.obtainReference()) {
        StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
            false,
            volume.getCapacity(),
            volume.getDfsUsed(),
            volume.getAvailable(),
            volume.getBlockPoolUsed(bpid));
        reports.add(sr);
      } catch (ClosedChannelException e) {
        continue;
      }
    }
  }

  return reports.toArray(new StorageReport[reports.size()]);
}
项目:big-c    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:big-c    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:big-c    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:big-c    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class));
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:big-c    文件:TestDatanodeReport.java   
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);

  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());

    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary);

    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
项目:hops    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
// keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep =
      {new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
          DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
  DatanodeCommand[] cmds =
      nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
          .getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand) cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
      .getVolumeFailureSummary();
  int numFailedVolumes = volumeFailureSummary != null ?
      volumeFailureSummary.getFailedStorageLocations().length : 0;
  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      numFailedVolumes,
      volumeFailureSummary);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FsDatasetImpl.java   
@Override // FsDatasetSpi
public StorageReport[] getStorageReports(String bpid)
    throws IOException {
  List<StorageReport> reports;
  synchronized (statsLock) {
    List<FsVolumeImpl> curVolumes = getVolumes();
    reports = new ArrayList<>(curVolumes.size());
    for (FsVolumeImpl volume : curVolumes) {
      try (FsVolumeReference ref = volume.obtainReference()) {
        StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
            false,
            volume.getCapacity(),
            volume.getDfsUsed(),
            volume.getAvailable(),
            volume.getBlockPoolUsed(bpid));
        reports.add(sr);
      } catch (ClosedChannelException e) {
        continue;
      }
    }
  }

  return reports.toArray(new StorageReport[reports.size()]);
}
项目:hops    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep =
      {new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
          DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
  DatanodeCommand[] cmds =
      nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
          .getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(storage, false,
      DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep,
      0L, 0L, 0, 0, 0, null).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyLong(),
        Mockito.anyLong(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.any(VolumeFailureSummary.class));
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDatanodeReport.java   
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);

  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());

    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
项目:hadoop-plus    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat from service actor: " + this);
  }
  // reports number of failed volumes
  StorageReport[] report = { new StorageReport(bpRegistration.getStorageID(),
      false,
      dn.getFSDataset().getCapacity(),
      dn.getFSDataset().getDfsUsed(),
      dn.getFSDataset().getRemaining(),
      dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) };
  return bpNamenode.sendHeartbeat(bpRegistration, report,
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      dn.getFSDataset().getNumFailedVolumes());
}
项目:hadoop-plus    文件:DatanodeProtocolClientSideTranslatorPB.java   
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
    StorageReport[] reports, int xmitsInProgress, int xceiverCount,
    int failedVolumes) throws IOException {
  HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
      .setFailedVolumes(failedVolumes);
  for (StorageReport r : reports) {
    builder.addReports(PBHelper.convert(r));
  }

  HeartbeatResponseProto resp;
  try {
    resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
  int index = 0;
  for (DatanodeCommandProto p : resp.getCmdsList()) {
    cmds[index] = PBHelper.convert(p);
    index++;
  }
  return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()));
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node.
 * Ignore reply commands.
 */
void sendHeartbeat() throws IOException {
  // register datanode
  // TODO:FEDERATION currently a single block pool is supported
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if(cmds != null) {
    for (DatanodeCommand cmd : cmds ) {
      if(LOG.isDebugEnabled()) {
        LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
      }
    }
  }
}
项目:hadoop-plus    文件:NNThroughputBenchmark.java   
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
项目:hadoop-plus    文件:TestBPOfferService.java   
/**
 * Set up a mock NN with the bare minimum for a DN to register to it.
 */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx)
    throws Exception {
  DatanodeProtocolClientSideTranslatorPB mock =
      Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
  Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
      .when(mock).versionRequest();

  Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
    .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));

  Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
    .when(mock).sendHeartbeat(
        Mockito.any(DatanodeRegistration.class),
        Mockito.any(StorageReport[].class),
        Mockito.anyInt(),
        Mockito.anyInt(),
        Mockito.anyInt());
  mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
  return mock;
}
项目:FlexMap    文件:FSNamesystem.java   
/**
 * The given node has reported in.  This method should:
 * 1) Record the heartbeat, so the datanode isn't timed out
 * 2) Adjust usage stats for future block allocation
 * 
 * If a substantial amount of time passed since the last datanode 
 * heartbeat then request an immediate block report.  
 * 
 * @return an array of datanode commands 
 * @throws IOException
 */
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xceiverCount, int xmitsInProgress, int failedVolumes)
      throws IOException {
  readLock();
  try {
    //get datanode commands
    final int maxTransfer = blockManager.getMaxReplicationStreams()
        - xmitsInProgress;
    DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
        nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed,
        xceiverCount, maxTransfer, failedVolumes);

    //create ha status
    final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat(
        haContext.getState().getServiceState(),
        getFSImage().getLastAppliedOrWrittenTxId());

    return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo);
  } finally {
    readUnlock();
  }
}
项目:FlexMap    文件:BPServiceActor.java   
HeartbeatResponse sendHeartBeat() throws IOException {
  StorageReport[] reports =
      dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Sending heartbeat with " + reports.length +
              " storage reports from service actor: " + this);
  }

  return bpNamenode.sendHeartbeat(bpRegistration,
      reports,
      dn.getFSDataset().getCacheCapacity(),
      dn.getFSDataset().getCacheUsed(),
      dn.getXmitsInProgress(),
      dn.getXceiverCount(),
      dn.getFSDataset().getNumFailedVolumes());
}