DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type ) throws AccessControlException, StandbyException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); readLock(); try { checkOperation(OperationCategory.UNCHECKED); final DatanodeManager dm = getBlockManager().getDatanodeManager(); final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type); DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()]; for (int i = 0; i < reports.length; i++) { final DatanodeDescriptor d = datanodes.get(i); reports[i] = new DatanodeStorageReport(new DatanodeInfo(d), d.getStorageReports()); } return reports; } finally { readUnlock(); } }
/** Get live datanode storage reports and then build the network topology. */ public List<DatanodeStorageReport> init() throws IOException { final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport(); final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>(); // create network topology and classify utilization collections: // over-utilized, above-average, below-average and under-utilized. for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) { final DatanodeInfo datanode = r.getDatanodeInfo(); if (shouldIgnore(datanode)) { continue; } trimmed.add(r); cluster.add(datanode); } return trimmed; }
static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException { final DatanodeInfo[] infos = client.datanodeReport(type); assertEquals(numDatanodes, infos.length); final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type); assertEquals(numDatanodes, reports.length); for(int i = 0; i < infos.length; i++) { assertEquals(infos[i], reports[i].getDatanodeInfo()); final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes); if (bpid != null) { //check storage final StorageReport[] computed = reports[i].getStorageReports(); Arrays.sort(computed, CMP); final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid); Arrays.sort(expected, CMP); assertEquals(expected.length, computed.length); for(int j = 0; j < expected.length; j++) { assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID()); } } } }
/** * Compare the total blockpool usage on each datanode to ensure that nothing * was balanced. * * @param preReports storage reports from pre balancer run * @param postReports storage reports from post balancer run */ private static void compareTotalPoolUsage(DatanodeStorageReport[] preReports, DatanodeStorageReport[] postReports) { Assert.assertNotNull(preReports); Assert.assertNotNull(postReports); Assert.assertEquals(preReports.length, postReports.length); for (DatanodeStorageReport preReport : preReports) { String dnUuid = preReport.getDatanodeInfo().getDatanodeUuid(); for(DatanodeStorageReport postReport : postReports) { if(postReport.getDatanodeInfo().getDatanodeUuid().equals(dnUuid)) { Assert.assertEquals(getTotalPoolUsage(preReport), getTotalPoolUsage(postReport)); LOG.info("Comparision of datanode pool usage pre/post balancer run. " + "PrePoolUsage: " + getTotalPoolUsage(preReport) + ", PostPoolUsage: " + getTotalPoolUsage(postReport)); break; } } } }
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){ HashMap<String, Integer> disksPerHost = new HashMap<>(); try { @SuppressWarnings("resource") DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf()); DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL); for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) { disksPerHost.put( datanodeStorageReport.getDatanodeInfo().getHostName(), datanodeStorageReport.getStorageReports().length); } } catch (IOException e) { LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies)."); } return disksPerHost; }
public DatanodeStorageReport[] getDatanodeStorageReport( DatanodeReportType type) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("datanodeStorageReport", traceSampler); try { return namenode.getDatanodeStorageReport(type); } finally { scope.close(); } }
void init() throws IOException { initStoragePolicies(); final List<DatanodeStorageReport> reports = dispatcher.init(); for(DatanodeStorageReport r : reports) { final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo()); for(StorageType t : StorageType.getMovableTypes()) { final Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher); final long maxRemaining = getMaxRemaining(r, t); final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t, maxRemaining) : null; storages.add(source, target); } } }
private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) { long max = 0L; for(StorageReport r : report.getStorageReports()) { if (r.getStorage().getStorageType() == t) { if (r.getRemaining() > max) { max = r.getRemaining(); } } } return max; }
@Override // ClientProtocol public DatanodeStorageReport[] getDatanodeStorageReport( DatanodeReportType type) throws IOException { checkNNStartup(); final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type); return reports; }
private static long getCapacity(DatanodeStorageReport report, StorageType t) { long capacity = 0L; for(StorageReport r : report.getStorageReports()) { if (r.getStorage().getStorageType() == t) { capacity += r.getCapacity(); } } return capacity; }
private static long getRemaining(DatanodeStorageReport report, StorageType t) { long remaining = 0L; for(StorageReport r : report.getStorageReports()) { if (r.getStorage().getStorageType() == t) { remaining += r.getRemaining(); } } return remaining; }
@Override void accumulateSpaces(DatanodeStorageReport r) { for(StorageReport s : r.getStorageReports()) { final StorageType t = s.getStorage().getStorageType(); totalCapacities.add(t, s.getCapacity()); totalUsedSpaces.add(t, s.getDfsUsed()); } }
@Override Double getUtilization(DatanodeStorageReport r, final StorageType t) { long capacity = 0L; long dfsUsed = 0L; for(StorageReport s : r.getStorageReports()) { if (s.getStorage().getStorageType() == t) { capacity += s.getCapacity(); dfsUsed += s.getDfsUsed(); } } return capacity == 0L? null: dfsUsed*100.0/capacity; }
@Override void accumulateSpaces(DatanodeStorageReport r) { for(StorageReport s : r.getStorageReports()) { final StorageType t = s.getStorage().getStorageType(); totalCapacities.add(t, s.getCapacity()); totalUsedSpaces.add(t, s.getBlockPoolUsed()); } }
@Override Double getUtilization(DatanodeStorageReport r, final StorageType t) { long capacity = 0L; long blockPoolUsed = 0L; for(StorageReport s : r.getStorageReports()) { if (s.getStorage().getStorageType() == t) { capacity += s.getCapacity(); blockPoolUsed += s.getBlockPoolUsed(); } } return capacity == 0L? null: blockPoolUsed*100.0/capacity; }
public static DatanodeStorageReportProto convertDatanodeStorageReport( DatanodeStorageReport report) { return DatanodeStorageReportProto.newBuilder() .setDatanodeInfo(convert(report.getDatanodeInfo())) .addAllStorageReports(convertStorageReports(report.getStorageReports())) .build(); }
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports( DatanodeStorageReport[] reports) { final List<DatanodeStorageReportProto> protos = new ArrayList<DatanodeStorageReportProto>(reports.length); for(int i = 0; i < reports.length; i++) { protos.add(convertDatanodeStorageReport(reports[i])); } return protos; }
public static DatanodeStorageReport[] convertDatanodeStorageReports( List<DatanodeStorageReportProto> protos) { final DatanodeStorageReport[] reports = new DatanodeStorageReport[protos.size()]; for(int i = 0; i < reports.length; i++) { reports[i] = convertDatanodeStorageReport(protos.get(i)); } return reports; }
@Override public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type) throws IOException { final GetDatanodeStorageReportRequestProto req = GetDatanodeStorageReportRequestProto.newBuilder() .setType(PBHelper.convert(type)).build(); try { return PBHelper.convertDatanodeStorageReports( rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
public DatanodeStorageReport[] getDatanodeStorageReport( DatanodeReportType type) throws IOException { checkOpen(); try (TraceScope ignored = tracer.newScope("datanodeStorageReport")) { return namenode.getDatanodeStorageReport(type); } }
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports( DatanodeStorageReport[] reports) { final List<DatanodeStorageReportProto> protos = new ArrayList<>(reports.length); for (DatanodeStorageReport report : reports) { protos.add(convertDatanodeStorageReport(report)); } return protos; }
@Override public DatanodeStorageReport[] getDatanodeStorageReport( DatanodeReportType type) throws IOException { final GetDatanodeStorageReportRequestProto req = GetDatanodeStorageReportRequestProto.newBuilder() .setType(PBHelperClient.convert(type)).build(); try { return PBHelperClient.convertDatanodeStorageReports( rpcProxy.getDatanodeStorageReport(null, req) .getDatanodeStorageReportsList()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }