Java 类org.apache.hadoop.hdfs.protocol.BlockStoragePolicy 实例源码

项目:hadoop    文件:FSDirectory.java   
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
项目:hadoop    文件:FSNamesystem.java   
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfoContiguous lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = file.getBlockReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
        .getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:hadoop    文件:BlockManager.java   
/**
 * Choose target datanodes for creating a new block.
 * 
 * @throws IOException
 *           if the number of targets < minimum replication.
 * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
 *      Set, long, List, BlockStoragePolicy)
 */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
    final int numOfReplicas, final Node client,
    final Set<Node> excludedNodes,
    final long blocksize,
    final List<String> favoredNodes,
    final byte storagePolicyID) throws IOException {
  List<DatanodeDescriptor> favoredDatanodeDescriptors = 
      getDatanodeDescriptors(favoredNodes);
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
      numOfReplicas, client, excludedNodes, blocksize, 
      favoredDatanodeDescriptors, storagePolicy);
  if (targets.length < minReplication) {
    throw new IOException("File " + src + " could only be replicated to "
        + targets.length + " nodes instead of minReplication (="
        + minReplication + ").  There are "
        + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
        + " datanode(s) running and "
        + (excludedNodes == null? "no": excludedNodes.size())
        + " node(s) are excluded in this operation.");
  }
  return targets;
}
项目:hadoop    文件:PBHelper.java   
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
  BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
      .newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
  // creation storage types
  StorageTypesProto creationProto = convert(policy.getStorageTypes());
  Preconditions.checkArgument(creationProto != null);
  builder.setCreationPolicy(creationProto);
  // creation fallback
  StorageTypesProto creationFallbackProto = convert(
      policy.getCreationFallbacks());
  if (creationFallbackProto != null) {
    builder.setCreationFallbackPolicy(creationFallbackProto);
  }
  // replication fallback
  StorageTypesProto replicationFallbackProto = convert(
      policy.getReplicationFallbacks());
  if (replicationFallbackProto != null) {
    builder.setReplicationFallbackPolicy(replicationFallbackProto);
  }
  return builder.build();
}
项目:hadoop    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
    RpcController controller, GetStoragePoliciesRequestProto request)
    throws ServiceException {
  try {
    BlockStoragePolicy[] policies = server.getStoragePolicies();
    GetStoragePoliciesResponseProto.Builder builder =
        GetStoragePoliciesResponseProto.newBuilder();
    if (policies == null) {
      return builder.build();
    }
    for (BlockStoragePolicy policy : policies) {
      builder.addPolicies(PBHelper.convert(policy));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:StoragePolicyAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    System.out.println("Block Storage Policies:");
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        System.out.println("\t" + policy);
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:hadoop    文件:TestStorageMover.java   
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
项目:hadoop    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:hadoop    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:hadoop    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:StoragePolicyAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    System.out.println("Block Storage Policies:");
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        System.out.println("\t" + policy);
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
  BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
      .newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
  // creation storage types
  StorageTypesProto creationProto = convert(policy.getStorageTypes());
  Preconditions.checkArgument(creationProto != null);
  builder.setCreationPolicy(creationProto);
  // creation fallback
  StorageTypesProto creationFallbackProto = convert(
      policy.getCreationFallbacks());
  if (creationFallbackProto != null) {
    builder.setCreationFallbackPolicy(creationFallbackProto);
  }
  // replication fallback
  StorageTypesProto replicationFallbackProto = convert(
      policy.getReplicationFallbacks());
  if (replicationFallbackProto != null) {
    builder.setReplicationFallbackPolicy(replicationFallbackProto);
  }
  return builder.build();
}
项目:big-c    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAppendOp.java   
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
    INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfo lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = lastBlock.getReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = fsn.getFSDirectory()
        .getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
@Override
public final ContentSummaryComputationContext computeContentSummary(
    int snapshotId, final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  counts.addContent(Content.FILE, 1);
  final long fileLen = computeFileSize(snapshotId);
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
      .getStorageSpace());

  if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
    String path) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(path);
  fsd.readLock();
  try {
    path = fsd.resolvePath(pc, path, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath(path, false);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ);
    }
    INode inode = iip.getLastINode();
    if (inode == null) {
      throw new FileNotFoundException("File/Directory does not exist: "
          + iip.getPath());
    }
    return bm.getStoragePolicy(inode.getStoragePolicyID());
  } finally {
    fsd.readUnlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FileWithSnapshotFeature.java   
public void cleanFile(INode.ReclaimContext reclaimContext,
    final INodeFile file, final int snapshotId, int priorSnapshotId,
    byte storagePolicyId) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
        .getPolicy(storagePolicyId);
    QuotaCounts old = file.storagespaceConsumed(policy);
    collectBlocksAndClear(reclaimContext, file);
    QuotaCounts current = file.storagespaceConsumed(policy);
    reclaimContext.quotaDelta().add(old.subtract(current));
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
        file);
  }
}
项目:aliyun-oss-hadoop-fs    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
    RpcController controller, GetStoragePoliciesRequestProto request)
    throws ServiceException {
  try {
    BlockStoragePolicy[] policies = server.getStoragePolicies();
    GetStoragePoliciesResponseProto.Builder builder =
        GetStoragePoliciesResponseProto.newBuilder();
    if (policies == null) {
      return builder.build();
    }
    for (BlockStoragePolicy policy : policies) {
      builder.addPolicies(PBHelperClient.convert(policy));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:big-c    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:aliyun-oss-hadoop-fs    文件:TestStorageMover.java   
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestDeleteRace.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
项目:aliyun-oss-hadoop-fs    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:FSDirectory.java   
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
项目:big-c    文件:FSNamesystem.java   
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfoContiguous lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = file.getBlockReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
        .getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:big-c    文件:BlockManager.java   
/**
 * Choose target datanodes for creating a new block.
 * 
 * @throws IOException
 *           if the number of targets < minimum replication.
 * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
 *      Set, long, List, BlockStoragePolicy)
 */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
    final int numOfReplicas, final Node client,
    final Set<Node> excludedNodes,
    final long blocksize,
    final List<String> favoredNodes,
    final byte storagePolicyID) throws IOException {
  List<DatanodeDescriptor> favoredDatanodeDescriptors = 
      getDatanodeDescriptors(favoredNodes);
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
      numOfReplicas, client, excludedNodes, blocksize, 
      favoredDatanodeDescriptors, storagePolicy);
  if (targets.length < minReplication) {
    throw new IOException("File " + src + " could only be replicated to "
        + targets.length + " nodes instead of minReplication (="
        + minReplication + ").  There are "
        + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
        + " datanode(s) running and "
        + (excludedNodes == null? "no": excludedNodes.size())
        + " node(s) are excluded in this operation.");
  }
  return targets;
}
项目:big-c    文件:ClientNamenodeProtocolServerSideTranslatorPB.java   
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
    RpcController controller, GetStoragePoliciesRequestProto request)
    throws ServiceException {
  try {
    BlockStoragePolicy[] policies = server.getStoragePolicies();
    GetStoragePoliciesResponseProto.Builder builder =
        GetStoragePoliciesResponseProto.newBuilder();
    if (policies == null) {
      return builder.build();
    }
    for (BlockStoragePolicy policy : policies) {
      builder.addPolicies(PBHelper.convert(policy));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * @return All the existing storage policies
 */
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
  TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
  try {
    return namenode.getStoragePolicies();
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:INodeFile.java   
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
项目:hadoop    文件:FSDirAttrOp.java   
static HdfsFileStatus setStoragePolicy(
    FSDirectory fsd, BlockManager bm, String src, final String policyName)
    throws IOException {
  if (!fsd.isStoragePolicyEnabled()) {
    throw new IOException(
        "Failed to set storage policy since "
            + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
    iip = fsd.getINodesInPath4Write(src);

    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    // get the corresponding policy and make sure the policy name is valid
    BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
    if (policy == null) {
      throw new HadoopIllegalArgumentException(
          "Cannot find a block policy with the name " + policyName);
    }
    unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
    fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:StoragePolicySummary.java   
void add(StorageType[] storageTypes, BlockStoragePolicy policy) {
  StorageTypeAllocation storageCombo = 
      new StorageTypeAllocation(storageTypes, policy);
  Long count = storageComboCounts.get(storageCombo);
  if (count == null) {
    storageComboCounts.put(storageCombo, 1l);
    storageCombo.setActualStoragePolicy(
        getStoragePolicy(storageCombo.getStorageTypes()));
  } else {
    storageComboCounts.put(storageCombo, count.longValue()+1);
  }
  totalBlocks++;
}
项目:hadoop    文件:StoragePolicySummary.java   
/**
 * 
 * @param storageTypes - sorted array of storageTypes
 * @return Storage Policy which matches the specific storage Combination
 */
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
  for (BlockStoragePolicy storagePolicy:storagePolicies) {
    StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
    policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
    Arrays.sort(policyStorageTypes);
    if (policyStorageTypes.length <= storageTypes.length) {
      int i = 0; 
      for (; i < policyStorageTypes.length; i++) {
        if (policyStorageTypes[i] != storageTypes[i]) {
          break;
        }
      }
      if (i < policyStorageTypes.length) {
        continue;
      }
      int j=policyStorageTypes.length;
      for (; j < storageTypes.length; j++) {
        if (policyStorageTypes[i-1] != storageTypes[j]) {
          break;
        }
      }

      if (j==storageTypes.length) {
        return storagePolicy;
      }
    }
  }
  return null;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * @return All the existing block storage policies
 */
BlockStoragePolicy[] getStoragePolicies() throws IOException {
  checkOperation(OperationCategory.READ);
  waitForLoadingFSImage();
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    return FSDirAttrOp.getStoragePolicies(blockManager);
  } finally {
    readUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
private void setNewINodeStoragePolicy(INodeFile inode,
                                      INodesInPath iip,
                                      boolean isLazyPersist)
    throws IOException {

  if (isLazyPersist) {
    BlockStoragePolicy lpPolicy =
        blockManager.getStoragePolicy("LAZY_PERSIST");

    // Set LAZY_PERSIST storage policy if the flag was passed to
    // CreateFile.
    if (lpPolicy == null) {
      throw new HadoopIllegalArgumentException(
          "The LAZY_PERSIST storage policy has been disabled " +
          "by the administrator.");
    }
    inode.setStoragePolicyID(lpPolicy.getId(),
                               iip.getLatestSnapshotId());
  } else {
    BlockStoragePolicy effectivePolicy =
        blockManager.getStoragePolicy(inode.getStoragePolicyID());

    if (effectivePolicy != null &&
        effectivePolicy.isCopyOnCreateFile()) {
      // Copy effective policy from ancestor directory to current file.
      inode.setStoragePolicyID(effectivePolicy.getId(),
                               iip.getLatestSnapshotId());
    }
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Periodically go over the list of lazyPersist files with missing
 * blocks and unlink them from the namespace.
 */
private void clearCorruptLazyPersistFiles()
    throws IOException {

  BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");

  List<BlockCollection> filesToDelete = new ArrayList<>();
  boolean changed = false;
  writeLock();
  try {
    final Iterator<Block> it = blockManager.getCorruptReplicaBlockIterator();

    while (it.hasNext()) {
      Block b = it.next();
      BlockInfoContiguous blockInfo = blockManager.getStoredBlock(b);
      if (blockInfo.getBlockCollection().getStoragePolicyID()
          == lpPolicy.getId()) {
        filesToDelete.add(blockInfo.getBlockCollection());
      }
    }

    for (BlockCollection bc : filesToDelete) {
      LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
      BlocksMapUpdateInfo toRemoveBlocks =
          FSDirDeleteOp.deleteInternal(
              FSNamesystem.this, bc.getName(),
              INodesInPath.fromINode((INodeFile) bc), false);
      changed |= toRemoveBlocks != null;
      if (toRemoveBlocks != null) {
        removeBlocks(toRemoveBlocks); // Incremental deletion of blocks
      }
    }
  } finally {
    writeUnlock();
  }
  if (changed) {
    getEditLog().logSync();
  }
}
项目:hadoop    文件:BlockManager.java   
/** Choose target for getting additional datanodes for an existing pipeline. */
public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src,
    int numAdditionalNodes,
    Node clientnode,
    List<DatanodeStorageInfo> chosen,
    Set<Node> excludes,
    long blocksize,
    byte storagePolicyID) {

  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
      chosen, true, excludes, blocksize, storagePolicy);
}
项目:hadoop    文件:BlockPlacementPolicyDefault.java   
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
      excludedNodes, blocksize, storagePolicy);
}
项目:hadoop    文件:BlockPlacementPolicy.java   
/**
 * Same as {@link #chooseTarget(String, int, Node, Set, long, List, StorageType)}
 * with added parameter {@code favoredDatanodes}
 * @param favoredNodes datanodes that should be favored as targets. This
 *          is only a hint and due to cluster state, namenode may not be 
 *          able to place the blocks on these datanodes.
 */
DatanodeStorageInfo[] chooseTarget(String src,
    int numOfReplicas, Node writer,
    Set<Node> excludedNodes,
    long blocksize,
    List<DatanodeDescriptor> favoredNodes,
    BlockStoragePolicy storagePolicy) {
  // This class does not provide the functionality of placing
  // a block in favored datanodes. The implementations of this class
  // are expected to provide this functionality

  return chooseTarget(src, numOfReplicas, writer, 
      new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
      excludedNodes, blocksize, storagePolicy);
}
项目:hadoop    文件:BlockStoragePolicySuite.java   
public BlockStoragePolicy getPolicy(String policyName) {
  Preconditions.checkNotNull(policyName);

  if (policies != null) {
    for (BlockStoragePolicy policy : policies) {
      if (policy != null && policy.getName().equalsIgnoreCase(policyName)) {
        return policy;
      }
    }
  }
  return null;
}
项目:hadoop    文件:BlockStoragePolicySuite.java   
public BlockStoragePolicy[] getAllPolicies() {
  List<BlockStoragePolicy> list = Lists.newArrayList();
  if (policies != null) {
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        list.add(policy);
      }
    }
  }
  return list.toArray(new BlockStoragePolicy[list.size()]);
}
项目:hadoop    文件:PBHelper.java   
public static BlockStoragePolicy[] convertStoragePolicies(
    List<BlockStoragePolicyProto> policyProtos) {
  if (policyProtos == null || policyProtos.size() == 0) {
    return new BlockStoragePolicy[0];
  }
  BlockStoragePolicy[] policies = new BlockStoragePolicy[policyProtos.size()];
  int i = 0;
  for (BlockStoragePolicyProto proto : policyProtos) {
    policies[i++] = convert(proto);
  }
  return policies;
}