@Before public void setUp() throws IOException { final DataNode datanode = Mockito.mock(DataNode.class); storage = Mockito.mock(DataStorage.class); scanner = Mockito.mock(DataBlockScanner.class); this.conf = new Configuration(); final DNConf dnConf = new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); when(datanode.getBlockScanner()).thenReturn(scanner); createStorageDirs(storage, conf, NUM_INIT_VOLUMES); dataset = new FsDatasetImpl(datanode, storage, conf); for (String bpid : BLOCK_POOL_IDS) { dataset.addBlockPool(bpid, conf); } assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size()); assertEquals(0, dataset.getNumFailedVolumes()); }
@Override public String next() { String curLine = line; try { readNext(); } catch (IOException e) { DataBlockScanner.LOG.warn("Failed to read next line.", e); } return curLine; }
/** Disable the logs that are not very useful for snapshot related tests. */ public static void disableLogs() { final String[] lognames = { "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService", }; for(String n : lognames) { setLevel2OFF(LogFactory.getLog(n)); } setLevel2OFF(LogFactory.getLog(UserGroupInformation.class)); setLevel2OFF(LogFactory.getLog(BlockManager.class)); setLevel2OFF(LogFactory.getLog(FSNamesystem.class)); setLevel2OFF(LogFactory.getLog(DirectoryScanner.class)); setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class)); setLevel2OFF(DataBlockScanner.LOG); setLevel2OFF(HttpServer.LOG); setLevel2OFF(DataNode.LOG); setLevel2OFF(BlockPoolSliceStorage.LOG); setLevel2OFF(LeaseManager.LOG); setLevel2OFF(NameNode.stateChangeLog); setLevel2OFF(NameNode.blockStateChangeLog); setLevel2OFF(DFSClient.LOG); setLevel2OFF(Server.LOG); }
@Override public String next() { String curLine = line; try { lastReadFile = file; readNext(); } catch (IOException e) { DataBlockScanner.LOG.warn("Failed to read next line.", e); } return curLine; }
/** Disable the logs that are not very useful for snapshot related tests. */ public static void disableLogs() { final String[] lognames = { "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService", }; for(String n : lognames) { setLevel2OFF(LogFactory.getLog(n)); } setLevel2OFF(LogFactory.getLog(UserGroupInformation.class)); setLevel2OFF(LogFactory.getLog(BlockManager.class)); setLevel2OFF(LogFactory.getLog(FSNamesystem.class)); setLevel2OFF(LogFactory.getLog(DirectoryScanner.class)); setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class)); setLevel2OFF(DataBlockScanner.LOG); setLevel2OFF(HttpServer2.LOG); setLevel2OFF(DataNode.LOG); setLevel2OFF(BlockPoolSliceStorage.LOG); setLevel2OFF(LeaseManager.LOG); setLevel2OFF(NameNode.stateChangeLog); setLevel2OFF(NameNode.blockStateChangeLog); setLevel2OFF(DFSClient.LOG); setLevel2OFF(Server.LOG); }
public static void checkMetaInfo(Block b, InterDatanodeProtocol idp, DataBlockScanner scanner) throws IOException { BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b); assertEquals(b.getBlockId(), metainfo.getBlockId()); assertEquals(b.getNumBytes(), metainfo.getNumBytes()); if (scanner != null) { assertEquals(scanner.getLastScanTime(b), metainfo.getLastScanTime()); } }