Java 类org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration 实例源码

项目:hadoop    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:aliyun-oss-hadoop-fs    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:big-c    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:FlexMap    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop-on-lustre2    文件:CachePoolInfo.java   
public static void validate(CachePoolInfo info) throws IOException {
  if (info == null) {
    throw new InvalidRequestException("CachePoolInfo is null");
  }
  if ((info.getLimit() != null) && (info.getLimit() < 0)) {
    throw new InvalidRequestException("Limit is negative.");
  }
  if (info.getMaxRelativeExpiryMs() != null) {
    long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
    if (maxRelativeExpiryMs < 0l) {
      throw new InvalidRequestException("Max relative expiry is negative.");
    }
    if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
      throw new InvalidRequestException("Max relative expiry is too big.");
    }
  }
  validateName(info.poolName);
}
项目:hadoop    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:hadoop    文件:CacheAdmin.java   
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
    throws IOException {
  CacheDirectiveInfo.Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:aliyun-oss-hadoop-fs    文件:CacheAdmin.java   
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
    throws IOException {
  CacheDirectiveInfo.Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:big-c    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:big-c    文件:CacheAdmin.java   
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
    throws IOException {
  CacheDirectiveInfo.Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheAdmin.java   
private static Expiration parseExpirationString(String ttlString)
    throws IOException {
  Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:FlexMap    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
      maxRelativeExpiryTime);
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:FlexMap    文件:CacheAdmin.java   
private static Expiration parseExpirationString(String ttlString)
    throws IOException {
  Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:hadoop-on-lustre2    文件:CacheManager.java   
/**
 * Calculates the absolute expiry time of the directive from the
 * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
 * into an absolute time based on the local clock.
 * 
 * @param info to validate.
 * @param maxRelativeExpiryTime of the info's pool.
 * @return the expiration time, or the pool's max absolute expiration if the
 *         info's expiration was not set.
 * @throws InvalidRequestException if the info's Expiration is invalid.
 */
private static long validateExpiryTime(CacheDirectiveInfo info,
    long maxRelativeExpiryTime) throws InvalidRequestException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Validating directive " + info
        + " pool maxRelativeExpiryTime " + maxRelativeExpiryTime);
  }
  final long now = new Date().getTime();
  final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
  if (info == null || info.getExpiration() == null) {
    return maxAbsoluteExpiryTime;
  }
  Expiration expiry = info.getExpiration();
  if (expiry.getMillis() < 0l) {
    throw new InvalidRequestException("Cannot set a negative expiration: "
        + expiry.getMillis());
  }
  long relExpiryTime, absExpiryTime;
  if (expiry.isRelative()) {
    relExpiryTime = expiry.getMillis();
    absExpiryTime = now + relExpiryTime;
  } else {
    absExpiryTime = expiry.getMillis();
    relExpiryTime = absExpiryTime - now;
  }
  // Need to cap the expiry so we don't overflow a long when doing math
  if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
    throw new InvalidRequestException("Expiration "
        + expiry.toString() + " is too far in the future!");
  }
  // Fail if the requested expiry is greater than the max
  if (relExpiryTime > maxRelativeExpiryTime) {
    throw new InvalidRequestException("Expiration " + expiry.toString()
        + " exceeds the max relative expiration time of "
        + maxRelativeExpiryTime + " ms.");
  }
  return absExpiryTime;
}
项目:hadoop-on-lustre2    文件:CacheAdmin.java   
private static Expiration parseExpirationString(String ttlString)
    throws IOException {
  Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:hadoop    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = AdminHelper.getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[row.size()]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = AdminHelper.getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[row.size()]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheDirectives.java   
@Test(timeout=120000)
public void testExpiry() throws Exception {
  String pool = "pool1";
  dfs.addCachePool(new CachePoolInfo(pool));
  Path p = new Path("/mypath");
  DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
  // Expire after test timeout
  Date start = new Date();
  Date expiry = DateUtils.addSeconds(start, 120);
  final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
      .setPath(p)
      .setPool(pool)
      .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
      .setReplication((short)2)
      .build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
  // Change it to expire sooner
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(0)).build());
  waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
  RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
  CacheDirectiveEntry ent = it.next();
  assertFalse(it.hasNext());
  Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should have expired",
      entryExpiry.before(new Date()));
  // Change it back to expire later
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(120000)).build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
  it = dfs.listCacheDirectives(null);
  ent = it.next();
  assertFalse(it.hasNext());
  entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should not have expired",
      entryExpiry.after(new Date()));
  // Verify that setting a negative TTL throws an error
  try {
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(-1)).build());
  } catch (InvalidRequestException e) {
    GenericTestUtils
        .assertExceptionContains("Cannot set a negative expiration", e);
  }
}
项目:big-c    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = AdminHelper.getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[row.size()]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
项目:big-c    文件:TestCacheDirectives.java   
@Test(timeout=120000)
public void testExpiry() throws Exception {
  String pool = "pool1";
  dfs.addCachePool(new CachePoolInfo(pool));
  Path p = new Path("/mypath");
  DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
  // Expire after test timeout
  Date start = new Date();
  Date expiry = DateUtils.addSeconds(start, 120);
  final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
      .setPath(p)
      .setPool(pool)
      .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
      .setReplication((short)2)
      .build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
  // Change it to expire sooner
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(0)).build());
  waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
  RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
  CacheDirectiveEntry ent = it.next();
  assertFalse(it.hasNext());
  Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should have expired",
      entryExpiry.before(new Date()));
  // Change it back to expire later
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(120000)).build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
  it = dfs.listCacheDirectives(null);
  ent = it.next();
  assertFalse(it.hasNext());
  entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should not have expired",
      entryExpiry.after(new Date()));
  // Verify that setting a negative TTL throws an error
  try {
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(-1)).build());
  } catch (InvalidRequestException e) {
    GenericTestUtils
        .assertExceptionContains("Cannot set a negative expiration", e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[0]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCacheDirectives.java   
@Test(timeout=120000)
public void testExpiry() throws Exception {
  String pool = "pool1";
  dfs.addCachePool(new CachePoolInfo(pool));
  Path p = new Path("/mypath");
  DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
  // Expire after test timeout
  Date start = new Date();
  Date expiry = DateUtils.addSeconds(start, 120);
  final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
      .setPath(p)
      .setPool(pool)
      .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
      .setReplication((short)2)
      .build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
  // Change it to expire sooner
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(0)).build());
  waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
  RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
  CacheDirectiveEntry ent = it.next();
  assertFalse(it.hasNext());
  Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should have expired",
      entryExpiry.before(new Date()));
  // Change it back to expire later
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(120000)).build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
  it = dfs.listCacheDirectives(null);
  ent = it.next();
  assertFalse(it.hasNext());
  entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should not have expired",
      entryExpiry.after(new Date()));
  // Verify that setting a negative TTL throws an error
  try {
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(-1)).build());
  } catch (InvalidRequestException e) {
    GenericTestUtils
        .assertExceptionContains("Cannot set a negative expiration", e);
  }
}
项目:FlexMap    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  String idFilter = StringUtils.popOptionWithArgument("-id", args);
  if (idFilter != null) {
    builder.setId(Long.parseLong(idFilter));
  }
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[0]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s%n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }
  return 0;
}
项目:FlexMap    文件:TestCacheDirectives.java   
@Test(timeout=120000)
public void testExpiry() throws Exception {
  String pool = "pool1";
  dfs.addCachePool(new CachePoolInfo(pool));
  Path p = new Path("/mypath");
  DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
  // Expire after test timeout
  Date start = new Date();
  Date expiry = DateUtils.addSeconds(start, 120);
  final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
      .setPath(p)
      .setPool(pool)
      .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
      .setReplication((short)2)
      .build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
  // Change it to expire sooner
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(0)).build());
  waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
  RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
  CacheDirectiveEntry ent = it.next();
  assertFalse(it.hasNext());
  Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should have expired",
      entryExpiry.before(new Date()));
  // Change it back to expire later
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(120000)).build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
  it = dfs.listCacheDirectives(null);
  ent = it.next();
  assertFalse(it.hasNext());
  entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should not have expired",
      entryExpiry.after(new Date()));
  // Verify that setting a negative TTL throws an error
  try {
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(-1)).build());
  } catch (InvalidRequestException e) {
    GenericTestUtils
        .assertExceptionContains("Cannot set a negative expiration", e);
  }
}
项目:hadoop-on-lustre2    文件:CacheAdmin.java   
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  CacheDirectiveInfo.Builder builder =
      new CacheDirectiveInfo.Builder();
  String pathFilter = StringUtils.popOptionWithArgument("-path", args);
  if (pathFilter != null) {
    builder.setPath(new Path(pathFilter));
  }
  String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
  if (poolFilter != null) {
    builder.setPool(poolFilter);
  }
  boolean printStats = StringUtils.popOption("-stats", args);
  if (!args.isEmpty()) {
    System.err.println("Can't understand argument: " + args.get(0));
    return 1;
  }
  TableListing.Builder tableBuilder = new TableListing.Builder().
      addField("ID", Justification.RIGHT).
      addField("POOL", Justification.LEFT).
      addField("REPL", Justification.RIGHT).
      addField("EXPIRY", Justification.LEFT).
      addField("PATH", Justification.LEFT);
  if (printStats) {
    tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
                addField("BYTES_CACHED", Justification.RIGHT).
                addField("FILES_NEEDED", Justification.RIGHT).
                addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing tableListing = tableBuilder.build();
  try {
    DistributedFileSystem dfs = getDFS(conf);
    RemoteIterator<CacheDirectiveEntry> iter =
        dfs.listCacheDirectives(builder.build());
    int numEntries = 0;
    while (iter.hasNext()) {
      CacheDirectiveEntry entry = iter.next();
      CacheDirectiveInfo directive = entry.getInfo();
      CacheDirectiveStats stats = entry.getStats();
      List<String> row = new LinkedList<String>();
      row.add("" + directive.getId());
      row.add(directive.getPool());
      row.add("" + directive.getReplication());
      String expiry;
      // This is effectively never, round for nice printing
      if (directive.getExpiration().getMillis() >
          Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
        expiry = "never";
      } else {
        expiry = directive.getExpiration().toString();
      }
      row.add(expiry);
      row.add(directive.getPath().toUri().getPath());
      if (printStats) {
        row.add("" + stats.getBytesNeeded());
        row.add("" + stats.getBytesCached());
        row.add("" + stats.getFilesNeeded());
        row.add("" + stats.getFilesCached());
      }
      tableListing.addRow(row.toArray(new String[0]));
      numEntries++;
    }
    System.out.print(String.format("Found %d entr%s\n",
        numEntries, numEntries == 1 ? "y" : "ies"));
    if (numEntries > 0) {
      System.out.print(tableListing);
    }
  } catch (IOException e) {
    System.err.println(prettifyException(e));
    return 2;
  }
  return 0;
}
项目:hadoop-on-lustre2    文件:TestCacheDirectives.java   
@Test(timeout=120000)
public void testExpiry() throws Exception {
  String pool = "pool1";
  dfs.addCachePool(new CachePoolInfo(pool));
  Path p = new Path("/mypath");
  DFSTestUtil.createFile(dfs, p, BLOCK_SIZE*2, (short)2, 0x999);
  // Expire after test timeout
  Date start = new Date();
  Date expiry = DateUtils.addSeconds(start, 120);
  final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
      .setPath(p)
      .setPool(pool)
      .setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry))
      .setReplication((short)2)
      .build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
  // Change it to expire sooner
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(0)).build());
  waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
  RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
  CacheDirectiveEntry ent = it.next();
  assertFalse(it.hasNext());
  Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should have expired",
      entryExpiry.before(new Date()));
  // Change it back to expire later
  dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
      .setExpiration(Expiration.newRelative(120000)).build());
  waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
  it = dfs.listCacheDirectives(null);
  ent = it.next();
  assertFalse(it.hasNext());
  entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
  assertTrue("Directive should not have expired",
      entryExpiry.after(new Date()));
  // Verify that setting a negative TTL throws an error
  try {
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id)
        .setExpiration(Expiration.newRelative(-1)).build());
  } catch (InvalidRequestException e) {
    GenericTestUtils
        .assertExceptionContains("Cannot set a negative expiration", e);
  }
}