Java 类org.apache.hadoop.hdfs.protocol.CachePoolInfo 实例源码

项目:hadoop    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(DataInput in)
    throws IOException {
  String poolName = readString(in);
  CachePoolInfo info = new CachePoolInfo(poolName);
  int flags = readInt(in);
  if ((flags & 0x1) != 0) {
    info.setOwnerName(readString(in));
  }
  if ((flags & 0x2) != 0)  {
    info.setGroupName(readString(in));
  }
  if ((flags & 0x4) != 0) {
    info.setMode(FsPermission.read(in));
  }
  if ((flags & 0x8) != 0) {
    info.setLimit(readLong(in));
  }
  if ((flags & 0x10) != 0) {
    info.setMaxRelativeExpiryMs(readLong(in));
  }
  if ((flags & ~0x1F) != 0) {
    throw new IOException("Unknown flag in CachePoolInfo: " + flags);
  }
  return info;
}
项目:hadoop    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(Stanza st)
    throws InvalidXmlException {
  String poolName = st.getValue("POOLNAME");
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (st.hasChildren("OWNERNAME")) {
    info.setOwnerName(st.getValue("OWNERNAME"));
  }
  if (st.hasChildren("GROUPNAME")) {
    info.setGroupName(st.getValue("GROUPNAME"));
  }
  if (st.hasChildren("MODE")) {
    info.setMode(FSEditLogOp.fsPermissionFromXml(st));
  }
  if (st.hasChildren("LIMIT")) {
    info.setLimit(Long.parseLong(st.getValue("LIMIT")));
  }
  if (st.hasChildren("MAXRELATIVEEXPIRY")) {
    info.setMaxRelativeExpiryMs(
        Long.parseLong(st.getValue("MAXRELATIVEEXPIRY")));
  }
  return info;
}
项目:hadoop    文件:FSNamesystem.java   
void addCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  String poolInfoStr = null;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot add cache pool " + req.getPoolName(), safeMode);
    }
    CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
        logRetryCache);
    poolInfoStr = info.toString();
    success = true;
  } finally {
    writeUnlock();
    logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
  }

  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot modify cache pool " + req.getPoolName(), safeMode);
    }
    FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String poolNameStr = "{poolName: " +
        (req == null ? null : req.getPoolName()) + "}";
    logAuditEvent(success, "modifyCachePool", poolNameStr,
                  req == null ? null : req.toString(), null);
  }

  getEditLog().logSync();
}
项目:hadoop    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:hadoop    文件:CacheManager.java   
/**
 * Create a cache pool.
 * 
 * Only the superuser should be able to call this function.
 *
 * @param info    The info for the cache pool to create.
 * @return        Information about the cache pool we created.
 */
public CachePoolInfo addCachePool(CachePoolInfo info)
    throws IOException {
  assert namesystem.hasWriteLock();
  CachePool pool;
  try {
    CachePoolInfo.validate(info);
    String poolName = info.getPoolName();
    pool = cachePools.get(poolName);
    if (pool != null) {
      throw new InvalidRequestException("Cache pool " + poolName
          + " already exists.");
    }
    pool = CachePool.createFromInfoAndDefaults(info);
    cachePools.put(pool.getPoolName(), pool);
  } catch (IOException e) {
    LOG.info("addCachePool of " + info + " failed: ", e);
    throw e;
  }
  LOG.info("addCachePool of {} successful.", info);
  return pool.getInfo(true);
}
项目:hadoop    文件:CachePool.java   
/**
 * Create a new cache pool based on a CachePoolInfo object and the defaults.
 * We will fill in information that was not supplied according to the
 * defaults.
 */
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
    throws IOException {
  UserGroupInformation ugi = null;
  String ownerName = info.getOwnerName();
  if (ownerName == null) {
    ugi = NameNode.getRemoteUser();
    ownerName = ugi.getShortUserName();
  }
  String groupName = info.getGroupName();
  if (groupName == null) {
    if (ugi == null) {
      ugi = NameNode.getRemoteUser();
    }
    groupName = ugi.getPrimaryGroupName();
  }
  FsPermission mode = (info.getMode() == null) ? 
      FsPermission.getCachePoolDefault() : info.getMode();
  long limit = info.getLimit() == null ?
      CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
  long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
      CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
      info.getMaxRelativeExpiryMs();
  return new CachePool(info.getPoolName(),
      ownerName, groupName, mode, limit, maxRelativeExpiry);
}
项目:hadoop    文件:PBHelper.java   
public static CachePoolInfoProto convert(CachePoolInfo info) {
  CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
  builder.setPoolName(info.getPoolName());
  if (info.getOwnerName() != null) {
    builder.setOwnerName(info.getOwnerName());
  }
  if (info.getGroupName() != null) {
    builder.setGroupName(info.getGroupName());
  }
  if (info.getMode() != null) {
    builder.setMode(info.getMode().toShort());
  }
  if (info.getLimit() != null) {
    builder.setLimit(info.getLimit());
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
  }
  return builder.build();
}
项目:hadoop    文件:PBHelper.java   
public static CachePoolInfo convert (CachePoolInfoProto proto) {
  // Pool name is a required field, the rest are optional
  String poolName = checkNotNull(proto.getPoolName());
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (proto.hasOwnerName()) {
      info.setOwnerName(proto.getOwnerName());
  }
  if (proto.hasGroupName()) {
    info.setGroupName(proto.getGroupName());
  }
  if (proto.hasMode()) {
    info.setMode(new FsPermission((short)proto.getMode()));
  }
  if (proto.hasLimit())  {
    info.setLimit(proto.getLimit());
  }
  if (proto.hasMaxRelativeExpiry()) {
    info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
  }
  return info;
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache pools, list cache pools,
 * switch active NN, and list cache pools again.
 */
@Test (timeout=60000)
public void testListCachePools() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCachePools-" + i;
    dfs.addCachePool(new CachePoolInfo(poolName));
    poolNames.add(poolName);
  }
  listCachePools(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCachePools(poolNames, 1);
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache directives, list cache directives,
 * switch active NN, and list cache directives again.
 */
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  Path path = new Path("/p");
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCacheDirectives-" + i;
    CacheDirectiveInfo directiveInfo =
      new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
    dfs.addCachePool(new CachePoolInfo(poolName));
    dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
    poolNames.add(poolName);
  }
  listCacheDirectives(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCacheDirectives(poolNames, 1);
}
项目:hadoop    文件:TestCacheDirectives.java   
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);

  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static CachePoolInfo convert (CachePoolInfoProto proto) {
  // Pool name is a required field, the rest are optional
  String poolName = Preconditions.checkNotNull(proto.getPoolName());
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (proto.hasOwnerName()) {
    info.setOwnerName(proto.getOwnerName());
  }
  if (proto.hasGroupName()) {
    info.setGroupName(proto.getGroupName());
  }
  if (proto.hasMode()) {
    info.setMode(new FsPermission((short)proto.getMode()));
  }
  if (proto.hasLimit())  {
    info.setLimit(proto.getLimit());
  }
  if (proto.hasMaxRelativeExpiry()) {
    info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
  }
  return info;
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static CachePoolInfoProto convert(CachePoolInfo info) {
  CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
  builder.setPoolName(info.getPoolName());
  if (info.getOwnerName() != null) {
    builder.setOwnerName(info.getOwnerName());
  }
  if (info.getGroupName() != null) {
    builder.setGroupName(info.getGroupName());
  }
  if (info.getMode() != null) {
    builder.setMode(info.getMode().toShort());
  }
  if (info.getLimit() != null) {
    builder.setLimit(info.getLimit());
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
  }
  return builder.build();
}
项目:aliyun-oss-hadoop-fs    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(DataInput in)
    throws IOException {
  String poolName = readString(in);
  CachePoolInfo info = new CachePoolInfo(poolName);
  int flags = readInt(in);
  if ((flags & 0x1) != 0) {
    info.setOwnerName(readString(in));
  }
  if ((flags & 0x2) != 0)  {
    info.setGroupName(readString(in));
  }
  if ((flags & 0x4) != 0) {
    info.setMode(FsPermission.read(in));
  }
  if ((flags & 0x8) != 0) {
    info.setLimit(readLong(in));
  }
  if ((flags & 0x10) != 0) {
    info.setMaxRelativeExpiryMs(readLong(in));
  }
  if ((flags & ~0x1F) != 0) {
    throw new IOException("Unknown flag in CachePoolInfo: " + flags);
  }
  return info;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(Stanza st)
    throws InvalidXmlException {
  String poolName = st.getValue("POOLNAME");
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (st.hasChildren("OWNERNAME")) {
    info.setOwnerName(st.getValue("OWNERNAME"));
  }
  if (st.hasChildren("GROUPNAME")) {
    info.setGroupName(st.getValue("GROUPNAME"));
  }
  if (st.hasChildren("MODE")) {
    info.setMode(FSEditLogOp.fsPermissionFromXml(st));
  }
  if (st.hasChildren("LIMIT")) {
    info.setLimit(Long.parseLong(st.getValue("LIMIT")));
  }
  if (st.hasChildren("MAXRELATIVEEXPIRY")) {
    info.setMaxRelativeExpiryMs(
        Long.parseLong(st.getValue("MAXRELATIVEEXPIRY")));
  }
  return info;
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
void addCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  writeLock();
  boolean success = false;
  String poolInfoStr = null;
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot add cache pool"
        + (req == null ? null : req.getPoolName()));
    CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
        logRetryCache);
    poolInfoStr = info.toString();
    success = true;
  } finally {
    writeUnlock();
    logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
  }

  getEditLog().logSync();
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot modify cache pool"
        + (req == null ? null : req.getPoolName()));
    FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String poolNameStr = "{poolName: " +
        (req == null ? null : req.getPoolName()) + "}";
    logAuditEvent(success, "modifyCachePool", poolNameStr,
                  req == null ? null : req.toString(), null);
  }

  getEditLog().logSync();
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
@Override //ClientProtocol
public void addCachePool(CachePoolInfo info) throws IOException {
  checkNNStartup();
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  boolean success = false;
  try {
    namesystem.addCachePool(info, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
@Override // ClientProtocol
public void modifyCachePool(CachePoolInfo info) throws IOException {
  checkNNStartup();
  namesystem.checkOperation(OperationCategory.WRITE);
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  boolean success = false;
  try {
    namesystem.modifyCachePool(info, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
/**
 * Create a cache pool.
 * 
 * Only the superuser should be able to call this function.
 *
 * @param info    The info for the cache pool to create.
 * @return        Information about the cache pool we created.
 */
public CachePoolInfo addCachePool(CachePoolInfo info)
    throws IOException {
  assert namesystem.hasWriteLock();
  CachePool pool;
  try {
    CachePoolInfo.validate(info);
    String poolName = info.getPoolName();
    pool = cachePools.get(poolName);
    if (pool != null) {
      throw new InvalidRequestException("Cache pool " + poolName
          + " already exists.");
    }
    pool = CachePool.createFromInfoAndDefaults(info);
    cachePools.put(pool.getPoolName(), pool);
  } catch (IOException e) {
    LOG.info("addCachePool of " + info + " failed: ", e);
    throw e;
  }
  LOG.info("addCachePool of {} successful.", info);
  return pool.getInfo(true);
}
项目:aliyun-oss-hadoop-fs    文件:CachePool.java   
/**
 * Create a new cache pool based on a CachePoolInfo object and the defaults.
 * We will fill in information that was not supplied according to the
 * defaults.
 */
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
    throws IOException {
  UserGroupInformation ugi = null;
  String ownerName = info.getOwnerName();
  if (ownerName == null) {
    ugi = NameNode.getRemoteUser();
    ownerName = ugi.getShortUserName();
  }
  String groupName = info.getGroupName();
  if (groupName == null) {
    if (ugi == null) {
      ugi = NameNode.getRemoteUser();
    }
    groupName = ugi.getPrimaryGroupName();
  }
  FsPermission mode = (info.getMode() == null) ? 
      FsPermission.getCachePoolDefault() : info.getMode();
  long limit = info.getLimit() == null ?
      CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
  long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
      CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
      info.getMaxRelativeExpiryMs();
  return new CachePool(info.getPoolName(),
      ownerName, groupName, mode, limit, maxRelativeExpiry);
}
项目:aliyun-oss-hadoop-fs    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache pools, list cache pools,
 * switch active NN, and list cache pools again.
 */
@Test (timeout=60000)
public void testListCachePools() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCachePools-" + i;
    dfs.addCachePool(new CachePoolInfo(poolName));
    poolNames.add(poolName);
  }
  listCachePools(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCachePools(poolNames, 1);
}
项目:aliyun-oss-hadoop-fs    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache directives, list cache directives,
 * switch active NN, and list cache directives again.
 */
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  Path path = new Path("/p");
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCacheDirectives-" + i;
    CacheDirectiveInfo directiveInfo =
      new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
    dfs.addCachePool(new CachePoolInfo(poolName));
    dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
    poolNames.add(poolName);
  }
  listCacheDirectives(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCacheDirectives(poolNames, 1);
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheDirectives.java   
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);

  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheDirectives.java   
@Test(timeout=60000)
public void testNoBackingReplica() throws Exception {
  // Cache all three replicas for a file.
  final Path filename = new Path("/noback");
  final short replication = (short) 3;
  DFSTestUtil.createFile(dfs, filename, 1, replication, 0x0BAC);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(
      new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename)
          .setReplication(replication).build());
  waitForCachedBlocks(namenode, 1, replication, "testNoBackingReplica:1");
  // Pause cache reports while we change the replication factor.
  // This will orphan some cached replicas.
  DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, true);
  try {
    dfs.setReplication(filename, (short) 1);
    DFSTestUtil.waitForReplication(dfs, filename, (short) 1, 30000);
    // The cache locations should drop down to 1 even without cache reports.
    waitForCachedBlocks(namenode, 1, (short) 1, "testNoBackingReplica:2");
  } finally {
    DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
  }
}
项目:big-c    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(DataInput in)
    throws IOException {
  String poolName = readString(in);
  CachePoolInfo info = new CachePoolInfo(poolName);
  int flags = readInt(in);
  if ((flags & 0x1) != 0) {
    info.setOwnerName(readString(in));
  }
  if ((flags & 0x2) != 0)  {
    info.setGroupName(readString(in));
  }
  if ((flags & 0x4) != 0) {
    info.setMode(FsPermission.read(in));
  }
  if ((flags & 0x8) != 0) {
    info.setLimit(readLong(in));
  }
  if ((flags & 0x10) != 0) {
    info.setMaxRelativeExpiryMs(readLong(in));
  }
  if ((flags & ~0x1F) != 0) {
    throw new IOException("Unknown flag in CachePoolInfo: " + flags);
  }
  return info;
}
项目:big-c    文件:FSImageSerialization.java   
public static CachePoolInfo readCachePoolInfo(Stanza st)
    throws InvalidXmlException {
  String poolName = st.getValue("POOLNAME");
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (st.hasChildren("OWNERNAME")) {
    info.setOwnerName(st.getValue("OWNERNAME"));
  }
  if (st.hasChildren("GROUPNAME")) {
    info.setGroupName(st.getValue("GROUPNAME"));
  }
  if (st.hasChildren("MODE")) {
    info.setMode(FSEditLogOp.fsPermissionFromXml(st));
  }
  if (st.hasChildren("LIMIT")) {
    info.setLimit(Long.parseLong(st.getValue("LIMIT")));
  }
  if (st.hasChildren("MAXRELATIVEEXPIRY")) {
    info.setMaxRelativeExpiryMs(
        Long.parseLong(st.getValue("MAXRELATIVEEXPIRY")));
  }
  return info;
}
项目:big-c    文件:FSNamesystem.java   
void addCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  String poolInfoStr = null;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot add cache pool " + req.getPoolName(), safeMode);
    }
    CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
        logRetryCache);
    poolInfoStr = info.toString();
    success = true;
  } finally {
    writeUnlock();
    logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
  }

  getEditLog().logSync();
}
项目:big-c    文件:FSNamesystem.java   
void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot modify cache pool " + req.getPoolName(), safeMode);
    }
    FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String poolNameStr = "{poolName: " +
        (req == null ? null : req.getPoolName()) + "}";
    logAuditEvent(success, "modifyCachePool", poolNameStr,
                  req == null ? null : req.toString(), null);
  }

  getEditLog().logSync();
}
项目:big-c    文件:CacheManager.java   
/**
 * Throws an exception if the CachePool does not have enough capacity to
 * cache the given path at the replication factor.
 *
 * @param pool CachePool where the path is being cached
 * @param path Path that is being cached
 * @param replication Replication factor of the path
 * @throws InvalidRequestException if the pool does not have enough capacity
 */
private void checkLimit(CachePool pool, String path,
    short replication) throws InvalidRequestException {
  CacheDirectiveStats stats = computeNeeded(path, replication);
  if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
    return;
  }
  if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
      .getLimit()) {
    throw new InvalidRequestException("Caching path " + path + " of size "
        + stats.getBytesNeeded() / replication + " bytes at replication "
        + replication + " would exceed pool " + pool.getPoolName()
        + "'s remaining capacity of "
        + (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
  }
}
项目:big-c    文件:CacheManager.java   
/**
 * Create a cache pool.
 * 
 * Only the superuser should be able to call this function.
 *
 * @param info    The info for the cache pool to create.
 * @return        Information about the cache pool we created.
 */
public CachePoolInfo addCachePool(CachePoolInfo info)
    throws IOException {
  assert namesystem.hasWriteLock();
  CachePool pool;
  try {
    CachePoolInfo.validate(info);
    String poolName = info.getPoolName();
    pool = cachePools.get(poolName);
    if (pool != null) {
      throw new InvalidRequestException("Cache pool " + poolName
          + " already exists.");
    }
    pool = CachePool.createFromInfoAndDefaults(info);
    cachePools.put(pool.getPoolName(), pool);
  } catch (IOException e) {
    LOG.info("addCachePool of " + info + " failed: ", e);
    throw e;
  }
  LOG.info("addCachePool of {} successful.", info);
  return pool.getInfo(true);
}
项目:big-c    文件:CachePool.java   
/**
 * Create a new cache pool based on a CachePoolInfo object and the defaults.
 * We will fill in information that was not supplied according to the
 * defaults.
 */
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
    throws IOException {
  UserGroupInformation ugi = null;
  String ownerName = info.getOwnerName();
  if (ownerName == null) {
    ugi = NameNode.getRemoteUser();
    ownerName = ugi.getShortUserName();
  }
  String groupName = info.getGroupName();
  if (groupName == null) {
    if (ugi == null) {
      ugi = NameNode.getRemoteUser();
    }
    groupName = ugi.getPrimaryGroupName();
  }
  FsPermission mode = (info.getMode() == null) ? 
      FsPermission.getCachePoolDefault() : info.getMode();
  long limit = info.getLimit() == null ?
      CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
  long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
      CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
      info.getMaxRelativeExpiryMs();
  return new CachePool(info.getPoolName(),
      ownerName, groupName, mode, limit, maxRelativeExpiry);
}
项目:big-c    文件:PBHelper.java   
public static CachePoolInfoProto convert(CachePoolInfo info) {
  CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
  builder.setPoolName(info.getPoolName());
  if (info.getOwnerName() != null) {
    builder.setOwnerName(info.getOwnerName());
  }
  if (info.getGroupName() != null) {
    builder.setGroupName(info.getGroupName());
  }
  if (info.getMode() != null) {
    builder.setMode(info.getMode().toShort());
  }
  if (info.getLimit() != null) {
    builder.setLimit(info.getLimit());
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
  }
  return builder.build();
}
项目:big-c    文件:PBHelper.java   
public static CachePoolInfo convert (CachePoolInfoProto proto) {
  // Pool name is a required field, the rest are optional
  String poolName = checkNotNull(proto.getPoolName());
  CachePoolInfo info = new CachePoolInfo(poolName);
  if (proto.hasOwnerName()) {
      info.setOwnerName(proto.getOwnerName());
  }
  if (proto.hasGroupName()) {
    info.setGroupName(proto.getGroupName());
  }
  if (proto.hasMode()) {
    info.setMode(new FsPermission((short)proto.getMode()));
  }
  if (proto.hasLimit())  {
    info.setLimit(proto.getLimit());
  }
  if (proto.hasMaxRelativeExpiry()) {
    info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
  }
  return info;
}
项目:big-c    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache pools, list cache pools,
 * switch active NN, and list cache pools again.
 */
@Test (timeout=60000)
public void testListCachePools() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCachePools-" + i;
    dfs.addCachePool(new CachePoolInfo(poolName));
    poolNames.add(poolName);
  }
  listCachePools(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCachePools(poolNames, 1);
}
项目:big-c    文件:TestRetryCacheWithHA.java   
/**
 * Add a list of cache directives, list cache directives,
 * switch active NN, and list cache directives again.
 */
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
  final int poolCount = 7;
  HashSet<String> poolNames = new HashSet<String>(poolCount);
  Path path = new Path("/p");
  for (int i=0; i<poolCount; i++) {
    String poolName = "testListCacheDirectives-" + i;
    CacheDirectiveInfo directiveInfo =
      new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
    dfs.addCachePool(new CachePoolInfo(poolName));
    dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
    poolNames.add(poolName);
  }
  listCacheDirectives(poolNames, 0);

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.waitActive(1);
  listCacheDirectives(poolNames, 1);
}
项目:big-c    文件:TestCacheDirectives.java   
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);

  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);
}
项目:hadoop    文件:DFSClient.java   
public void addCachePool(CachePoolInfo info) throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("addCachePool", traceSampler);
  try {
    namenode.addCachePool(info);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}