Java 类org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl 实例源码

项目:hadoop    文件:TestStorageMover.java   
private void setVolumeFull(DataNode dn, StorageType type) {
  List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    if (volume.getStorageType() == type) {
      LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
          + volume.getStorageID());
      volume.setCapacityForTesting(0);
    }
  }
}
项目:hadoop    文件:TestBlockScanner.java   
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
项目:hadoop    文件:TestBlockScanner.java   
@Test(timeout=120000)
public void testNextSorted() throws Exception {
  List<String> arr = new LinkedList<String>();
  arr.add("1");
  arr.add("3");
  arr.add("5");
  arr.add("7");
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
  Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
}
项目:hadoop    文件:TestDataNodeHotSwapVolumes.java   
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath) {
  for (FsVolumeSpi vol : dn.getFSDataset().getVolumes()) {
    if (vol.getBasePath().equals(basePath.getPath())) {
      return (FsVolumeImpl)vol;
    }
  }
  return null;
}
项目:hadoop    文件:TestDataNodeHotSwapVolumes.java   
/**
 * Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
 * DataNode upon a volume failure. Thus we can run reconfig on the same
 * configuration to reload the new volume on the same directory as the failed one.
 */
@Test(timeout=60000)
public void testDirectlyReloadAfterCheckDiskError()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  createFile(new Path("/test"), 32, (short)2);

  DataNode dn = cluster.getDataNodes().get(0);
  final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
  File dirToFail = new File(cluster.getDataDirectory(), "data1");

  FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
  assertTrue("No FsVolume was found for " + dirToFail,
      failedVolume != null);
  long used = failedVolume.getDfsUsed();

  DataNodeTestUtils.injectDataDirFailure(dirToFail);
  // Call and wait DataNode to detect disk failure.
  long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
  dn.checkDiskErrorAsync();
  while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
    Thread.sleep(100);
  }

  createFile(new Path("/test1"), 32, (short)2);
  assertEquals(used, failedVolume.getDfsUsed());

  DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);

  createFile(new Path("/test2"), 32, (short)2);
  FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
  assertTrue(restoredVolume != null);
  assertTrue(restoredVolume != failedVolume);
  // More data has been written to this volume.
  assertTrue(restoredVolume.getDfsUsed() > used);
}
项目:aliyun-oss-hadoop-fs    文件:TestStorageMover.java   
private void setVolumeFull(DataNode dn, StorageType type) {
  try (FsDatasetSpi.FsVolumeReferences refs = dn.getFSDataset()
      .getFsVolumeReferences()) {
    for (FsVolumeSpi fvs : refs) {
      FsVolumeImpl volume = (FsVolumeImpl) fvs;
      if (volume.getStorageType() == type) {
        LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
            + volume.getStorageID());
        volume.setCapacityForTesting(0);
      }
    }
  } catch (IOException e) {
    LOG.error("Unexpected exception by closing FsVolumeReference", e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockScanner.java   
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockScanner.java   
@Test(timeout=120000)
public void testNextSorted() throws Exception {
  List<String> arr = new LinkedList<String>();
  arr.add("1");
  arr.add("3");
  arr.add("5");
  arr.add("7");
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
  Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
}
项目:aliyun-oss-hadoop-fs    文件:TestDataNodeHotSwapVolumes.java   
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath)
    throws IOException {
  try (FsDatasetSpi.FsVolumeReferences volumes =
    dn.getFSDataset().getFsVolumeReferences()) {
    for (FsVolumeSpi vol : volumes) {
      if (vol.getBasePath().equals(basePath.getPath())) {
        return (FsVolumeImpl) vol;
      }
    }
  }
  return null;
}
项目:big-c    文件:TestStorageMover.java   
private void setVolumeFull(DataNode dn, StorageType type) {
  List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    if (volume.getStorageType() == type) {
      LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
          + volume.getStorageID());
      volume.setCapacityForTesting(0);
    }
  }
}
项目:big-c    文件:TestBlockScanner.java   
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
项目:big-c    文件:TestBlockScanner.java   
@Test(timeout=120000)
public void testNextSorted() throws Exception {
  List<String> arr = new LinkedList<String>();
  arr.add("1");
  arr.add("3");
  arr.add("5");
  arr.add("7");
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
  Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
}
项目:big-c    文件:TestDataNodeHotSwapVolumes.java   
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath) {
  for (FsVolumeSpi vol : dn.getFSDataset().getVolumes()) {
    if (vol.getBasePath().equals(basePath.getPath())) {
      return (FsVolumeImpl)vol;
    }
  }
  return null;
}
项目:big-c    文件:TestDataNodeHotSwapVolumes.java   
/**
 * Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
 * DataNode upon a volume failure. Thus we can run reconfig on the same
 * configuration to reload the new volume on the same directory as the failed one.
 */
@Test(timeout=60000)
public void testDirectlyReloadAfterCheckDiskError()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  createFile(new Path("/test"), 32, (short)2);

  DataNode dn = cluster.getDataNodes().get(0);
  final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
  File dirToFail = new File(cluster.getDataDirectory(), "data1");

  FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
  assertTrue("No FsVolume was found for " + dirToFail,
      failedVolume != null);
  long used = failedVolume.getDfsUsed();

  DataNodeTestUtils.injectDataDirFailure(dirToFail);
  // Call and wait DataNode to detect disk failure.
  long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
  dn.checkDiskErrorAsync();
  while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
    Thread.sleep(100);
  }

  createFile(new Path("/test1"), 32, (short)2);
  assertEquals(used, failedVolume.getDfsUsed());

  DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);

  createFile(new Path("/test2"), 32, (short)2);
  FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
  assertTrue(restoredVolume != null);
  assertTrue(restoredVolume != failedVolume);
  // More data has been written to this volume.
  assertTrue(restoredVolume.getDfsUsed() > used);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestStorageMover.java   
private void setVolumeFull(DataNode dn, StorageType type) {
  List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    if (volume.getStorageType() == type) {
      LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
          + volume.getStorageID());
      volume.setCapacityForTesting(0);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockScanner.java   
@Before
public void before() {
  BlockScanner.Conf.allowUnitTestSettings = true;
  GenericTestUtils.setLogLevel(BlockScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(VolumeScanner.LOG, Level.ALL);
  GenericTestUtils.setLogLevel(FsVolumeImpl.LOG, Level.ALL);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestBlockScanner.java   
@Test(timeout=120000)
public void testNextSorted() throws Exception {
  List<String> arr = new LinkedList<String>();
  arr.add("1");
  arr.add("3");
  arr.add("5");
  arr.add("7");
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "2"));
  Assert.assertEquals("3", FsVolumeImpl.nextSorted(arr, "1"));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, ""));
  Assert.assertEquals("1", FsVolumeImpl.nextSorted(arr, null));
  Assert.assertEquals(null, FsVolumeImpl.nextSorted(arr, "9"));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataNodeHotSwapVolumes.java   
/** Get the FsVolume on the given basePath */
private FsVolumeImpl getVolume(DataNode dn, File basePath) {
  for (FsVolumeSpi vol : dn.getFSDataset().getVolumes()) {
    if (vol.getBasePath().equals(basePath.getPath())) {
      return (FsVolumeImpl)vol;
    }
  }
  return null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDataNodeHotSwapVolumes.java   
/**
 * Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
 * DataNode upon a volume failure. Thus we can run reconfig on the same
 * configuration to reload the new volume on the same directory as the failed one.
 */
@Test(timeout=60000)
public void testDirectlyReloadAfterCheckDiskError()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  createFile(new Path("/test"), 32, (short)2);

  DataNode dn = cluster.getDataNodes().get(0);
  final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
  File dirToFail = new File(cluster.getDataDirectory(), "data1");

  FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
  assertTrue("No FsVolume was found for " + dirToFail,
      failedVolume != null);
  long used = failedVolume.getDfsUsed();

  DataNodeTestUtils.injectDataDirFailure(dirToFail);
  // Call and wait DataNode to detect disk failure.
  long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
  dn.checkDiskErrorAsync();
  while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
    Thread.sleep(100);
  }

  createFile(new Path("/test1"), 32, (short)2);
  assertEquals(used, failedVolume.getDfsUsed());

  DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);

  createFile(new Path("/test2"), 32, (short)2);
  FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
  assertTrue(restoredVolume != null);
  assertTrue(restoredVolume != failedVolume);
  // More data has been written to this volume.
  assertTrue(restoredVolume.getDfsUsed() > used);
}
项目:PDHC    文件:FsDatasetAsyncDiskService.java   
public void submitSyncFileRangeRequest(FsVolumeImpl volume,
    final FileDescriptor fd, final long offset, final long nbytes,
    final int flags) {
  execute(volume.getCurrentDir(), new Runnable() {
    @Override
    public void run() {
      try {
        NativeIO.POSIX.syncFileRangeIfPossible(fd, offset, nbytes, flags);
      } catch (NativeIOException e) {
        LOG.warn("sync_file_range error", e);
      }
    }
  });
}
项目:PDHC    文件:FsDatasetAsyncDiskService.java   
/**
 * Delete the block file and meta file from the disk asynchronously, adjust
 * dfsUsed statistics accordingly.
 */
void deleteAsync(FsVolumeImpl volume, File blockFile, File metaFile,
    ExtendedBlock block, String trashDirectory) {
  LOG.info("Scheduling " + block.getLocalBlock()
      + " file " + blockFile + " for deletion");
  ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
      volume, blockFile, metaFile, block, trashDirectory);
  execute(volume.getCurrentDir(), deletionTask);
}
项目:PDHC    文件:FsDatasetAsyncDiskService.java   
ReplicaFileDeleteTask(FsVolumeImpl volume, File blockFile,
    File metaFile, ExtendedBlock block, String trashDirectory) {
  this.volume = volume;
  this.blockFile = blockFile;
  this.metaFile = metaFile;
  this.block = block;
  this.trashDirectory = trashDirectory;
}
项目:FlexMap    文件:TestStorageMover.java   
private void setVolumeFull(DataNode dn, StorageType type) {
  List<? extends FsVolumeSpi> volumes = dn.getFSDataset().getVolumes();
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    if (volume.getStorageType() == type) {
      LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]"
          + volume.getStorageID());
      volume.setCapacityForTesting(0);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDataNodeHotSwapVolumes.java   
/**
 * Verify that {@link DataNode#checkDiskErrors()} removes all metadata in
 * DataNode upon a volume failure. Thus we can run reconfig on the same
 * configuration to reload the new volume on the same directory as the failed one.
 */
@Test(timeout=60000)
public void testDirectlyReloadAfterCheckDiskError()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
  // volume failures which is currently not supported on Windows.
  assumeTrue(!Path.WINDOWS);

  startDFSCluster(1, 2);
  createFile(new Path("/test"), 32, (short)2);

  DataNode dn = cluster.getDataNodes().get(0);
  final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
  File dirToFail = new File(cluster.getDataDirectory(), "data1");

  FsVolumeImpl failedVolume = getVolume(dn, dirToFail);
  assertTrue("No FsVolume was found for " + dirToFail,
      failedVolume != null);
  long used = failedVolume.getDfsUsed();

  DataNodeTestUtils.injectDataDirFailure(dirToFail);
  // Call and wait DataNode to detect disk failure.
  long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
  dn.checkDiskErrorAsync();
  while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
    Thread.sleep(100);
  }

  createFile(new Path("/test1"), 32, (short)2);
  assertEquals(used, failedVolume.getDfsUsed());

  DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);

  createFile(new Path("/test2"), 32, (short)2);
  FsVolumeImpl restoredVolume = getVolume(dn, dirToFail);
  assertTrue(restoredVolume != null);
  assertTrue(restoredVolume != failedVolume);
  // More data has been written to this volume.
  assertTrue(restoredVolume.getDfsUsed() > used);
}
项目:FlexMap    文件:SimulatedFSDataset.java   
@Override
public void onCompleteLazyPersist(String bpId, long blockId,
    long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
  throw new UnsupportedOperationException();
}
项目:FlexMap    文件:FsDatasetSpi.java   
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task end
*/
public void onCompleteLazyPersist(String bpId, long blockId,
   long creationTime, File[] savedFiles, FsVolumeImpl targetVolume);