Java 类org.apache.hadoop.hdfs.server.protocol.DatanodeStorage 实例源码

项目:hadoop    文件:TestDFSShell.java   
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
  List<File> files = new ArrayList<File>();
  List<DataNode> datanodes = cluster.getDataNodes();
  String poolId = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
  for(int i = 0; i < blocks.size(); i++) {
    DataNode dn = datanodes.get(i);
    Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
    for(Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
      for(Block b : e.getValue()) {
        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
      }
    }        
  }
  return files;
}
项目:hadoop    文件:DatanodeDescriptor.java   
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
  synchronized (storageMap) {
    DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
    if (storage == null) {
      LOG.info("Adding new storage ID " + s.getStorageID() +
               " for DN " + getXferAddr());
      storage = new DatanodeStorageInfo(this, s);
      storageMap.put(s.getStorageID(), storage);
    } else if (storage.getState() != s.getState() ||
               storage.getStorageType() != s.getStorageType()) {
      // For backwards compatibility, make sure that the type and
      // state are updated. Some reports from older datanodes do
      // not include these fields so we may have assumed defaults.
      storage.updateFromStorage(s);
      storageMap.put(storage.getStorageID(), storage);
    }
    return storage;
  }
}
项目:hadoop    文件:BPServiceActor.java   
/**
 * @return pending incremental block report for given {@code storage}
 */
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
    DatanodeStorage storage) {
  PerStoragePendingIncrementalBR mapForStorage =
      pendingIncrementalBRperStorage.get(storage);

  if (mapForStorage == null) {
    // This is the first time we are adding incremental BR state for
    // this storage so create a new map. This is required once per
    // storage, per service actor.
    mapForStorage = new PerStoragePendingIncrementalBR();
    pendingIncrementalBRperStorage.put(storage, mapForStorage);
  }

  return mapForStorage;
}
项目:hadoop    文件:DataStorage.java   
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
项目:hadoop    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:hadoop    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop    文件:TestPendingDataNodeMessages.java   
@Test
public void testQueues() {
  DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
  DatanodeStorage storage = new DatanodeStorage("STORAGE_ID");
  DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);
  msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED);
  msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED);

  assertEquals(2, msgs.count());

  // Nothing queued yet for block 2
  assertNull(msgs.takeBlockQueue(block2Gs1));
  assertEquals(2, msgs.count());

  Queue<ReportedBlockInfo> q =
    msgs.takeBlockQueue(block1Gs2DifferentInstance);
  assertEquals(
      "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
      "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
      Joiner.on(",").join(q));
  assertEquals(0, msgs.count());

  // Should be null if we pull again
  assertNull(msgs.takeBlockQueue(block1Gs1));
  assertEquals(0, msgs.count());
}
项目:hadoop    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
项目:hadoop    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeDescriptor.java   
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
  synchronized (storageMap) {
    DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
    if (storage == null) {
      LOG.info("Adding new storage ID {} for DN {}", s.getStorageID(),
          getXferAddr());
      storage = new DatanodeStorageInfo(this, s);
      storageMap.put(s.getStorageID(), storage);
    } else if (storage.getState() != s.getState() ||
               storage.getStorageType() != s.getStorageType()) {
      // For backwards compatibility, make sure that the type and
      // state are updated. Some reports from older datanodes do
      // not include these fields so we may have assumed defaults.
      storage.updateFromStorage(s);
      storageMap.put(storage.getStorageID(), storage);
    }
    return storage;
  }
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeStats.java   
synchronized void add(final DatanodeDescriptor node) {
  xceiverCount += node.getXceiverCount();
  if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
    capacityUsed += node.getDfsUsed();
    blockPoolUsed += node.getBlockPoolUsed();
    nodesInService++;
    nodesInServiceXceiverCount += node.getXceiverCount();
    capacityTotal += node.getCapacity();
    capacityRemaining += node.getRemaining();
    cacheCapacity += node.getCacheCapacity();
    cacheUsed += node.getCacheUsed();
  } else if (!node.isDecommissioned()) {
    cacheCapacity += node.getCacheCapacity();
    cacheUsed += node.getCacheUsed();
  }
  Set<StorageType> storageTypes = new HashSet<>();
  for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      statsMap.addStorage(storageInfo, node);
      storageTypes.add(storageInfo.getStorageType());
    }
  }
  for (StorageType storageType : storageTypes) {
    statsMap.addNode(storageType, node);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DatanodeStats.java   
synchronized void subtract(final DatanodeDescriptor node) {
  xceiverCount -= node.getXceiverCount();
  if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
    capacityUsed -= node.getDfsUsed();
    blockPoolUsed -= node.getBlockPoolUsed();
    nodesInService--;
    nodesInServiceXceiverCount -= node.getXceiverCount();
    capacityTotal -= node.getCapacity();
    capacityRemaining -= node.getRemaining();
    cacheCapacity -= node.getCacheCapacity();
    cacheUsed -= node.getCacheUsed();
  } else if (!node.isDecommissioned()) {
    cacheCapacity -= node.getCacheCapacity();
    cacheUsed -= node.getCacheUsed();
  }
  Set<StorageType> storageTypes = new HashSet<>();
  for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      statsMap.subtractStorage(storageInfo, node);
      storageTypes.add(storageInfo.getStorageType());
    }
  }
  for (StorageType storageType : storageTypes) {
    statsMap.subtractNode(storageType, node);
  }
}
项目:aliyun-oss-hadoop-fs    文件:BPServiceActor.java   
/**
 * @return pending incremental block report for given {@code storage}
 */
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
    DatanodeStorage storage) {
  PerStoragePendingIncrementalBR mapForStorage =
      pendingIncrementalBRperStorage.get(storage);

  if (mapForStorage == null) {
    // This is the first time we are adding incremental BR state for
    // this storage so create a new map. This is required once per
    // storage, per service actor.
    mapForStorage = new PerStoragePendingIncrementalBR();
    pendingIncrementalBRperStorage.put(storage, mapForStorage);
  }

  return mapForStorage;
}
项目:aliyun-oss-hadoop-fs    文件:DataStorage.java   
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
项目:aliyun-oss-hadoop-fs    文件:TestDatanodeStartupFixesLegacyStorageIDs.java   
/**
 * Perform a upgrade using the test image corresponding to
 * testCaseName.
 *
 * @param testCaseName
 * @param expectedStorageId if null, then the upgrade generates a new
 *                          unique storage ID.
 * @throws IOException
 */
private static void runLayoutUpgradeTest(final String testCaseName,
                                         final String expectedStorageId)
    throws IOException {
  TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
  upgrade.unpackStorage(testCaseName + ".tgz", testCaseName + ".txt");
  Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
  initStorageDirs(conf, testCaseName);
  upgradeAndVerify(upgrade, conf, new ClusterVerifier() {
    @Override
    public void verifyClusterPostUpgrade(MiniDFSCluster cluster) throws IOException {
      // Verify that a GUID-based storage ID was generated.
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      StorageReport[] reports =
          cluster.getDataNodes().get(0).getFSDataset().getStorageReports(bpid);
      assertThat(reports.length, is(1));
      final String storageID = reports[0].getStorage().getStorageID();
      assertTrue(DatanodeStorage.isValidStorageId(storageID));

      if (expectedStorageId != null) {
        assertThat(storageID, is(expectedStorageId));
      }
    }
  });
}
项目:aliyun-oss-hadoop-fs    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
  dnRegistration.setNamespaceInfo(nsInfo);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  dataNodeProto.blockReport(dnRegistration, bpid, reports,
          new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
项目:aliyun-oss-hadoop-fs    文件:TestReplicationPolicy.java   
@Override
DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
  final String[] racks = {
      "/d1/r1",
      "/d1/r1",
      "/d1/r2",
      "/d1/r2",
      "/d2/r3",
      "/d2/r3"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  // create an extra storage for dn5.
  DatanodeStorage extraStorage = new DatanodeStorage(
      storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
      StorageType.DEFAULT);
  BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
      extraStorage);
  return DFSTestUtil.toDatanodeDescriptor(storages);
}
项目:big-c    文件:TestPendingDataNodeMessages.java   
@Test
public void testQueues() {
  DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
  DatanodeStorage storage = new DatanodeStorage("STORAGE_ID");
  DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage);
  msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED);
  msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED);

  assertEquals(2, msgs.count());

  // Nothing queued yet for block 2
  assertNull(msgs.takeBlockQueue(block2Gs1));
  assertEquals(2, msgs.count());

  Queue<ReportedBlockInfo> q =
    msgs.takeBlockQueue(block1Gs2DifferentInstance);
  assertEquals(
      "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
      "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
      Joiner.on(",").join(q));
  assertEquals(0, msgs.count());

  // Should be null if we pull again
  assertNull(msgs.takeBlockQueue(block1Gs1));
  assertEquals(0, msgs.count());
}
项目:big-c    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockManager.java   
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.setAlive(true);

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockManager.java   
@Test
public void testUseDelHint() {
  DatanodeStorageInfo delHint = new DatanodeStorageInfo(
      DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("id"));
  List<DatanodeStorageInfo> moreThan1Racks = Arrays.asList(delHint);
  List<StorageType> excessTypes = new ArrayList<>();
  BlockPlacementPolicyDefault policyDefault =
      (BlockPlacementPolicyDefault) bm.getBlockPlacementPolicy();
  excessTypes.add(StorageType.DEFAULT);
  Assert.assertTrue(policyDefault.useDelHint(delHint, null, moreThan1Racks,
      null, excessTypes));
  excessTypes.remove(0);
  excessTypes.add(StorageType.SSD);
  Assert.assertFalse(policyDefault.useDelHint(delHint, null, moreThan1Racks,
      null, excessTypes));
}
项目:aliyun-oss-hadoop-fs    文件:TestBlockInfo.java   
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
项目:aliyun-oss-hadoop-fs    文件:TestRecoverStripedFile.java   
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed()
    throws Exception {
  DataNode dataNode = cluster.dataNodes.get(0).datanode;

  // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
  // thread pool submission should succeed, so that it will not prevent
  // processing other tasks in the list if any exceptions.
  int size = cluster.dataNodes.size();
  byte[] liveIndices = new byte[size];
  DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
  DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil
      .newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(),
          new DatanodeStorage("s01"));
  DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] {
      targetDnInfos_1 };

  BlockECRecoveryInfo invalidECInfo = new BlockECRecoveryInfo(
      new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
      ErasureCodingPolicyManager.getSystemDefaultPolicy());
  List<BlockECRecoveryInfo> ecTasks = new ArrayList<BlockECRecoveryInfo>();
  ecTasks.add(invalidECInfo);
  dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
项目:big-c    文件:DatanodeDescriptor.java   
DatanodeStorageInfo updateStorage(DatanodeStorage s) {
  synchronized (storageMap) {
    DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
    if (storage == null) {
      LOG.info("Adding new storage ID " + s.getStorageID() +
               " for DN " + getXferAddr());
      storage = new DatanodeStorageInfo(this, s);
      storageMap.put(s.getStorageID(), storage);
    } else if (storage.getState() != s.getState() ||
               storage.getStorageType() != s.getStorageType()) {
      // For backwards compatibility, make sure that the type and
      // state are updated. Some reports from older datanodes do
      // not include these fields so we may have assumed defaults.
      storage.updateFromStorage(s);
      storageMap.put(storage.getStorageID(), storage);
    }
    return storage;
  }
}
项目:big-c    文件:BPServiceActor.java   
/**
 * @return pending incremental block report for given {@code storage}
 */
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
    DatanodeStorage storage) {
  PerStoragePendingIncrementalBR mapForStorage =
      pendingIncrementalBRperStorage.get(storage);

  if (mapForStorage == null) {
    // This is the first time we are adding incremental BR state for
    // this storage so create a new map. This is required once per
    // storage, per service actor.
    mapForStorage = new PerStoragePendingIncrementalBR();
    pendingIncrementalBRperStorage.put(storage, mapForStorage);
  }

  return mapForStorage;
}
项目:big-c    文件:DataStorage.java   
void format(StorageDirectory sd, NamespaceInfo nsInfo,
            String datanodeUuid) throws IOException {
  sd.clearDirectory(); // create directory
  this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
  this.clusterID = nsInfo.getClusterID();
  this.namespaceID = nsInfo.getNamespaceID();
  this.cTime = 0;
  setDatanodeUuid(datanodeUuid);

  if (sd.getStorageUuid() == null) {
    // Assign a new Storage UUID.
    sd.setStorageUuid(DatanodeStorage.generateUuid());
  }

  writeProperties(sd);
}
项目:big-c    文件:NNThroughputBenchmark.java   
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
项目:hadoop    文件:DatanodeDescriptor.java   
private void updateFailedStorage(
    Set<DatanodeStorageInfo> failedStorageInfos) {
  for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
    if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
      LOG.info(storageInfo + " failed.");
      storageInfo.setState(DatanodeStorage.State.FAILED);
    }
  }
}
项目:hadoop    文件:BlocksMap.java   
/**
 * Searches for the block in the BlocksMap and 
 * returns {@link Iterable} of the storages the block belongs to
 * <i>that are of the given {@link DatanodeStorage.State state}</i>.
 * 
 * @param state DatanodeStorage state by which to filter the returned Iterable
 */
Iterable<DatanodeStorageInfo> getStorages(Block b, final DatanodeStorage.State state) {
  return Iterables.filter(getStorages(blocks.get(b)), new Predicate<DatanodeStorageInfo>() {
    @Override
    public boolean apply(DatanodeStorageInfo storage) {
      return storage.getState() == state;
    }
  });
}
项目:hadoop    文件:BPServiceActor.java   
/**
 * Add a blockInfo for notification to NameNode. If another entry
 * exists for the same block it is removed.
 *
 * Caller must synchronize access using pendingIncrementalBRperStorage.
 */
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
    DatanodeStorage storage) {
  // Make sure another entry for the same block is first removed.
  // There may only be one such entry.
  for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
        pendingIncrementalBRperStorage.entrySet()) {
    if (entry.getValue().removeBlockInfo(bInfo)) {
      break;
    }
  }
  getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
}
项目:hadoop    文件:FsDatasetImpl.java   
private void addVolume(Collection<StorageLocation> dataLocations,
    Storage.StorageDirectory sd) throws IOException {
  final File dir = sd.getCurrentDir();
  final StorageType storageType =
      getStorageTypeFromLocations(dataLocations, sd.getRoot());

  // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
  // nothing needed to be rolled back to make various data structures, e.g.,
  // storageMap and asyncDiskService, consistent.
  FsVolumeImpl fsVolume = new FsVolumeImpl(
      this, sd.getStorageUuid(), dir, this.conf, storageType);
  FsVolumeReference ref = fsVolume.obtainReference();
  ReplicaMap tempVolumeMap = new ReplicaMap(this);
  fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);

  synchronized (this) {
    volumeMap.addAll(tempVolumeMap);
    storageMap.put(sd.getStorageUuid(),
        new DatanodeStorage(sd.getStorageUuid(),
            DatanodeStorage.State.NORMAL,
            storageType));
    asyncDiskService.addVolume(sd.getCurrentDir());
    volumes.addVolume(ref);
  }

  LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
项目:hadoop    文件:DataStorage.java   
/** Create an ID for this storage.
 * @return true if a new storage ID was generated.
 * */
public synchronized boolean createStorageID(
    StorageDirectory sd, boolean regenerateStorageIds) {
  final String oldStorageID = sd.getStorageUuid();
  if (oldStorageID == null || regenerateStorageIds) {
    sd.setStorageUuid(DatanodeStorage.generateUuid());
    LOG.info("Generated new storageID " + sd.getStorageUuid() +
        " for directory " + sd.getRoot() +
        (oldStorageID == null ? "" : (" to replace " + oldStorageID)));
    return true;
  }
  return false;
}
项目:hadoop    文件:PBHelper.java   
private static State convertState(StorageState state) {
  switch(state) {
  case READ_ONLY_SHARED:
    return DatanodeStorage.State.READ_ONLY_SHARED;
  case NORMAL:
  default:
    return DatanodeStorage.State.NORMAL;
  }
}
项目:hadoop    文件:PBHelper.java   
public static StorageReport convert(StorageReportProto p) {
  return new StorageReport(
      p.hasStorage() ?
          convert(p.getStorage()) :
          new DatanodeStorage(p.getStorageUuid()),
      p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
      p.getBlockPoolUsed());
}
项目:hadoop    文件:DFSTestUtil.java   
public static DatanodeStorageInfo createDatanodeStorageInfo(
    String storageID, String ip, String rack, String hostname,
    StorageType type) {
  final DatanodeStorage storage = new DatanodeStorage(storageID,
      DatanodeStorage.State.NORMAL, type);
  final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(
      ip, rack, storage, hostname);
  return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * 
 * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
 * @return the block report for the specified data node
 */
public Map<DatanodeStorage, BlockListAsLongs> getBlockReport(String bpid, int dataNodeIndex) {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  return DataNodeTestUtils.getFSDataset(dn).getBlockReports(bpid);
}
项目:hadoop    文件:MiniDFSCluster.java   
/**
 * 
 * @return block reports from all data nodes
 *    BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes()
 */
public List<Map<DatanodeStorage, BlockListAsLongs>> getAllBlockReports(String bpid) {
  int numDataNodes = dataNodes.size();
  final List<Map<DatanodeStorage, BlockListAsLongs>> result
      = new ArrayList<Map<DatanodeStorage, BlockListAsLongs>>(numDataNodes);
  for (int i = 0; i < numDataNodes; ++i) {
    result.add(getBlockReport(bpid, i));
  }
  return result;
}
项目:hadoop    文件:BlockManagerTestUtil.java   
public static DatanodeDescriptor getLocalDatanodeDescriptor(
    boolean initializeStorage) {
  DatanodeDescriptor dn = new DatanodeDescriptor(DFSTestUtil.getLocalDatanodeID());
  if (initializeStorage) {
    dn.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
  }
  return dn;
}
项目:hadoop    文件:BlockManagerTestUtil.java   
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
    String rackLocation, DatanodeStorage storage, String hostname) {
    DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
    if (storage != null) {
      dn.updateStorage(storage);
    }
    return dn;
}
项目:hadoop    文件:BlockManagerTestUtil.java   
public static StorageReport[] getStorageReportsForDatanode(
    DatanodeDescriptor dnd) {
  ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
  for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
    DatanodeStorage dns = new DatanodeStorage(
        storage.getStorageID(), storage.getState(), storage.getStorageType());
    StorageReport report = new StorageReport(
        dns ,false, storage.getCapacity(),
        storage.getDfsUsed(), storage.getRemaining(),
        storage.getBlockPoolUsed());
    reports.add(report);
  }
  return reports.toArray(StorageReport.EMPTY_ARRAY);
}