Java 类org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory 实例源码

项目:hadoop    文件:DelegationTokenSecretManager.java   
@Override
public byte[] retrievePassword(
    DelegationTokenIdentifier identifier) throws InvalidToken {
  try {
    // this check introduces inconsistency in the authentication to a
    // HA standby NN.  non-token auths are allowed into the namespace which
    // decides whether to throw a StandbyException.  tokens are a bit
    // different in that a standby may be behind and thus not yet know
    // of all tokens issued by the active NN.  the following check does
    // not allow ANY token auth, however it should allow known tokens in
    namesystem.checkOperation(OperationCategory.READ);
  } catch (StandbyException se) {
    // FIXME: this is a hack to get around changing method signatures by
    // tunneling a non-InvalidToken exception as the cause which the
    // RPC server will unwrap before returning to the client
    InvalidToken wrappedStandby = new InvalidToken("StandbyException");
    wrappedStandby.initCause(se);
    throw wrappedStandby;
  }
  return super.retrievePassword(identifier);
}
项目:hadoop    文件:DelegationTokenSecretManager.java   
@Override
public byte[] retriableRetrievePassword(DelegationTokenIdentifier identifier)
    throws InvalidToken, StandbyException, RetriableException, IOException {
  namesystem.checkOperation(OperationCategory.READ);
  try {
    return super.retrievePassword(identifier);
  } catch (InvalidToken it) {
    if (namesystem.inTransitionToActive()) {
      // if the namesystem is currently in the middle of transition to 
      // active state, let client retry since the corresponding editlog may 
      // have not been applied yet
      throw new RetriableException(it);
    } else {
      throw it;
    }
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Dump all metadata into specified file
 */
void metaSave(String filename) throws IOException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  writeLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    File file = new File(System.getProperty("hadoop.log.dir"), filename);
    PrintWriter out = new PrintWriter(new BufferedWriter(
        new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8)));
    metaSave(out);
    out.flush();
    out.close();
  } finally {
    writeUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set permissions for an existing file.
 * @throws IOException
 */
void setPermission(String src, FsPermission permission) throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set permission for " + src);
    auditStat = FSDirAttrOp.setPermission(dir, src, permission);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setPermission", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setPermission", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set owner for an existing file.
 * @throws IOException
 */
void setOwner(String src, String username, String group)
    throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set owner for " + src);
    auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setOwner", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setOwner", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Moves all the blocks from {@code srcs} and appends them to {@code target}
 * To avoid rollbacks we will verify validity of ALL of the args
 * before we start actual move.
 * 
 * This does not support ".inodes" relative path
 * @param target target to concat into
 * @param srcs file that will be concatenated
 * @throws IOException on error
 */
void concat(String target, String [] srcs, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  waitForLoadingFSImage();
  HdfsFileStatus stat = null;
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot concat " + target);
    stat = FSDirConcatOp.concat(dir, target, srcs, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    if (success) {
      getEditLog().logSync();
    }
    logAuditEvent(success, "concat", Arrays.toString(srcs), target, stat);
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * stores the modification and access time for this inode. 
 * The access time is precise up to an hour. The transaction, if needed, is
 * written to the edits log but is not flushed.
 */
void setTimes(String src, long mtime, long atime) throws IOException {
  HdfsFileStatus auditStat;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set times " + src);
    auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setTimes", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setTimes", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Create a symbolic link.
 */
@SuppressWarnings("deprecation")
void createSymlink(String target, String link,
    PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
    throws IOException {
  if (!FileSystem.areSymlinksEnabled()) {
    throw new UnsupportedOperationException("Symlinks not supported");
  }
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot create symlink " + link);
    auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms,
                                                createParent, logRetryCache);
  } catch (AccessControlException e) {
    logAuditEvent(false, "createSymlink", link, target, null);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "createSymlink", link, target, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set replication for an existing file.
 * 
 * The NameNode sets new replication and schedules either replication of 
 * under-replicated data blocks or removal of the excessive block copies 
 * if the blocks are over-replicated.
 * 
 * @see ClientProtocol#setReplication(String, short)
 * @param src file name
 * @param replication new replication
 * @return true if successful; 
 *         false if file does not exist or is a directory
 */
boolean setReplication(final String src, final short replication)
    throws IOException {
  boolean success = false;
  waitForLoadingFSImage();
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set replication for " + src);
    success = FSDirAttrOp.setReplication(dir, blockManager, src, replication);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setReplication", src);
    throw e;
  } finally {
    writeUnlock();
  }
  if (success) {
    getEditLog().logSync();
    logAuditEvent(true, "setReplication", src);
  }
  return success;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set the storage policy for a file or a directory.
 *
 * @param src file/directory path
 * @param policyName storage policy name
 */
void setStoragePolicy(String src, String policyName) throws IOException {
  HdfsFileStatus auditStat;
  waitForLoadingFSImage();
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set storage policy for " + src);
    auditStat = FSDirAttrOp.setStoragePolicy(
        dir, blockManager, src, policyName);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setStoragePolicy", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setStoragePolicy", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
/** 
 * Change the indicated filename. 
 * @deprecated Use {@link #renameTo(String, String, boolean,
 * Options.Rename...)} instead.
 */
@Deprecated
boolean renameTo(String src, String dst, boolean logRetryCache)
    throws IOException {
  waitForLoadingFSImage();
  checkOperation(OperationCategory.WRITE);
  FSDirRenameOp.RenameOldResult ret = null;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot rename " + src);
    ret = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache);
  } catch (AccessControlException e)  {
    logAuditEvent(false, "rename", src, dst, null);
    throw e;
  } finally {
    writeUnlock();
  }
  boolean success = ret != null && ret.success;
  if (success) {
    getEditLog().logSync();
  }
  logAuditEvent(success, "rename", src, dst,
      ret == null ? null : ret.auditStat);
  return success;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Get the file info for a specific file.
 *
 * @param src The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @throws AccessControlException if access is denied
 * @throws UnresolvedLinkException if a symlink is encountered.
 *
 * @return object containing information regarding the file
 *         or null if file not found
 * @throws StandbyException
 */
HdfsFileStatus getFileInfo(final String src, boolean resolveLink)
  throws IOException {
  checkOperation(OperationCategory.READ);
  HdfsFileStatus stat = null;
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    stat = FSDirStatAndListingOp.getFileInfo(dir, src, resolveLink);
  } catch (AccessControlException e) {
    logAuditEvent(false, "getfileinfo", src);
    throw e;
  } finally {
    readUnlock();
  }
  logAuditEvent(true, "getfileinfo", src);
  return stat;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Create all the necessary directories
 */
boolean mkdirs(String src, PermissionStatus permissions,
    boolean createParent) throws IOException {
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot create directory " + src);
    auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
  } catch (AccessControlException e) {
    logAuditEvent(false, "mkdirs", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "mkdirs", src, null, auditStat);
  return true;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Set the namespace quota and storage space quota for a directory.
 * See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the
 * contract.
 * 
 * Note: This does not support ".inodes" relative path.
 */
void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set quota on " + src);
    FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
    success = true;
  } finally {
    writeUnlock();
    if (success) {
      getEditLog().logSync();
    }
    logAuditEvent(success, "setQuota", src);
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Get a partial listing of the indicated directory
 *
 * @param src the directory name
 * @param startAfter the name to start after
 * @param needLocation if blockLocations need to be returned
 * @return a partial listing starting after startAfter
 * 
 * @throws AccessControlException if access is denied
 * @throws UnresolvedLinkException if symbolic link is encountered
 * @throws IOException if other I/O error occurred
 */
DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) 
    throws IOException {
  checkOperation(OperationCategory.READ);
  DirectoryListing dl = null;
  readLock();
  try {
    checkOperation(NameNode.OperationCategory.READ);
    dl = FSDirStatAndListingOp.getListingInt(dir, src, startAfter,
        needLocation);
  } catch (AccessControlException e) {
    logAuditEvent(false, "listStatus", src);
    throw e;
  } finally {
    readUnlock();
  }
  logAuditEvent(true, "listStatus", src);
  return dl;
}
项目:hadoop    文件:FSNamesystem.java   
DatanodeInfo[] datanodeReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

    DatanodeInfo[] arr = new DatanodeInfo[results.size()];
    for (int i=0; i<arr.length; i++) {
      arr[i] = new DatanodeInfo(results.get(i));
    }
    return arr;
  } finally {
    readUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
    ) throws AccessControlException, StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);
    final DatanodeManager dm = getBlockManager().getDatanodeManager();      
    final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);

    DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
    for (int i = 0; i < reports.length; i++) {
      final DatanodeDescriptor d = datanodes.get(i);
      reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
          d.getStorageReports());
    }
    return reports;
  } finally {
    readUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Save namespace image.
 * This will save current namespace into fsimage file and empty edits file.
 * Requires superuser privilege and safe mode.
 * 
 * @throws AccessControlException if superuser privilege is violated.
 * @throws IOException if 
 */
void saveNamespace() throws AccessControlException, IOException {
  checkOperation(OperationCategory.UNCHECKED);
  checkSuperuserPrivilege();

  cpLock();  // Block if a checkpointing is in progress on standby.
  readLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);

    if (!isInSafeMode()) {
      throw new IOException("Safe mode should be turned ON "
          + "in order to create namespace image.");
    }
    getFSImage().saveNamespace(this);
  } finally {
    readUnlock();
    cpUnlock();
  }
  LOG.info("New namespace image has been created");
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again.
 * Requires superuser privilege.
 * 
 * @throws AccessControlException if superuser privilege is violated.
 */
boolean restoreFailedStorage(String arg) throws AccessControlException,
    StandbyException {
  checkSuperuserPrivilege();
  checkOperation(OperationCategory.UNCHECKED);
  cpLock();  // Block if a checkpointing is in progress on standby.
  writeLock();
  try {
    checkOperation(OperationCategory.UNCHECKED);

    // if it is disabled - enable it and vice versa.
    if(arg.equals("check"))
      return getFSImage().getStorage().getRestoreFailedStorage();

    boolean val = arg.equals("true");  // false if not
    getFSImage().getStorage().setRestoreFailedStorage(val);

    return val;
  } finally {
    writeUnlock();
    cpUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
NamenodeCommand startCheckpoint(NamenodeRegistration backupNode,
    NamenodeRegistration activeNamenode) throws IOException {
  checkOperation(OperationCategory.CHECKPOINT);
  writeLock();
  try {
    checkOperation(OperationCategory.CHECKPOINT);
    checkNameNodeSafeMode("Checkpoint not started");

    LOG.info("Start checkpoint for " + backupNode.getAddress());
    NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode,
        activeNamenode);
    getEditLog().logSync();
    return cmd;
  } finally {
    writeUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Client is reporting some bad block locations.
 */
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
  checkOperation(OperationCategory.WRITE);
  NameNode.stateChangeLog.info("*DIR* reportBadBlocks");
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    for (int i = 0; i < blocks.length; i++) {
      ExtendedBlock blk = blocks[i].getBlock();
      DatanodeInfo[] nodes = blocks[i].getLocations();
      String[] storageIDs = blocks[i].getStorageIDs();
      for (int j = 0; j < nodes.length; j++) {
        blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
            storageIDs == null ? null: storageIDs[j], 
            "client machine reported it");
      }
    }
  } finally {
    writeUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Release (unregister) backup node.
 * <p>
 * Find and remove the backup stream corresponding to the node.
 * @throws IOException
 */
void releaseBackupNode(NamenodeRegistration registration)
  throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    if(getFSImage().getStorage().getNamespaceID()
       != registration.getNamespaceID())
      throw new IOException("Incompatible namespaceIDs: "
          + " Namenode namespaceID = "
          + getFSImage().getStorage().getNamespaceID() + "; "
          + registration.getRole() +
          " node namespaceID = " + registration.getNamespaceID());
    getEditLog().releaseBackupStream(registration);
  } finally {
    writeUnlock();
  }
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * 
 * @param token token to cancel
 * @throws IOException on error
 */
void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    checkNameNodeSafeMode("Cannot cancel delegation token");
    String canceller = getRemoteUser().getUserName();
    DelegationTokenIdentifier id = dtSecretManager
      .cancelToken(token, canceller);
    getEditLog().logCancelDelegationToken(id);
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
/** Allow snapshot on a directory. */
void allowSnapshot(String path) throws IOException {
  checkOperation(OperationCategory.WRITE);
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot allow snapshot for " + path);
    checkSuperuserPrivilege();
    FSDirSnapshotOp.allowSnapshot(dir, snapshotManager, path);
    success = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(success, "allowSnapshot", path, null, null);
}
项目:hadoop    文件:FSNamesystem.java   
/** Disallow snapshot on a directory. */
void disallowSnapshot(String path) throws IOException {
  checkOperation(OperationCategory.WRITE);
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot disallow snapshot for " + path);
    checkSuperuserPrivilege();
    FSDirSnapshotOp.disallowSnapshot(dir, snapshotManager, path);
    success = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(success, "disallowSnapshot", path, null, null);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Create a snapshot
 * @param snapshotRoot The directory path where the snapshot is taken
 * @param snapshotName The name of the snapshot
 */
String createSnapshot(String snapshotRoot, String snapshotName,
                      boolean logRetryCache) throws IOException {
  String snapshotPath = null;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot create snapshot for " + snapshotRoot);
    snapshotPath = FSDirSnapshotOp.createSnapshot(dir,
        snapshotManager, snapshotRoot, snapshotName, logRetryCache);
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(snapshotPath != null, "createSnapshot", snapshotRoot,
      snapshotPath, null);
  return snapshotPath;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Rename a snapshot
 * @param path The directory path where the snapshot was taken
 * @param snapshotOldName Old snapshot name
 * @param snapshotNewName New snapshot name
 * @throws SafeModeException
 * @throws IOException 
 */
void renameSnapshot(
    String path, String snapshotOldName, String snapshotNewName,
    boolean logRetryCache) throws IOException {
  checkOperation(OperationCategory.WRITE);
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot rename snapshot for " + path);
    FSDirSnapshotOp.renameSnapshot(dir, snapshotManager, path,
        snapshotOldName, snapshotNewName, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
  String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
  logAuditEvent(success, "renameSnapshot", oldSnapshotRoot,
      newSnapshotRoot, null);
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Get the list of snapshottable directories that are owned 
 * by the current user. Return all the snapshottable directories if the 
 * current user is a super user.
 * @return The list of all the current snapshottable directories
 * @throws IOException
 */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
    throws IOException {
  SnapshottableDirectoryStatus[] status = null;
  checkOperation(OperationCategory.READ);
  boolean success = false;
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    status = FSDirSnapshotOp.getSnapshottableDirListing(dir, snapshotManager);
    success = true;
  } finally {
    readUnlock();
  }
  logAuditEvent(success, "listSnapshottableDirectory", null, null, null);
  return status;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Get the difference between two snapshots (or between a snapshot and the
 * current status) of a snapshottable directory.
 * 
 * @param path The full path of the snapshottable directory.
 * @param fromSnapshot Name of the snapshot to calculate the diff from. Null
 *          or empty string indicates the current tree.
 * @param toSnapshot Name of the snapshot to calculated the diff to. Null or
 *          empty string indicates the current tree.
 * @return A report about the difference between {@code fromSnapshot} and 
 *         {@code toSnapshot}. Modified/deleted/created/renamed files and 
 *         directories belonging to the snapshottable directories are listed 
 *         and labeled as M/-/+/R respectively. 
 * @throws IOException
 */
SnapshotDiffReport getSnapshotDiffReport(String path,
    String fromSnapshot, String toSnapshot) throws IOException {
  SnapshotDiffReport diffs = null;
  checkOperation(OperationCategory.READ);
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    diffs = FSDirSnapshotOp.getSnapshotDiffReport(dir, snapshotManager,
        path, fromSnapshot, toSnapshot);
  } finally {
    readUnlock();
  }

  logAuditEvent(diffs != null, "computeSnapshotDiff", null, null, null);
  return diffs;
}
项目:hadoop    文件:FSNamesystem.java   
void removeCacheDirective(long id, boolean logRetryCache) throws IOException {
  checkOperation(OperationCategory.WRITE);
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot remove cache directives", safeMode);
    }
    FSNDNCacheOp.removeCacheDirective(this, cacheManager, id, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String idStr = "{id: " + Long.toString(id) + "}";
    logAuditEvent(success, "removeCacheDirective", idStr, null,
        null);
  }
  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(
    long startId, CacheDirectiveInfo filter) throws IOException {
  checkOperation(OperationCategory.READ);
  BatchedListEntries<CacheDirectiveEntry> results;
  cacheManager.waitForRescanIfNeeded();
  readLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.READ);
    results = FSNDNCacheOp.listCacheDirectives(this, cacheManager, startId,
        filter);
    success = true;
  } finally {
    readUnlock();
    logAuditEvent(success, "listCacheDirectives", filter.toString(), null,
        null);
  }
  return results;
}
项目:hadoop    文件:FSNamesystem.java   
void addCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  String poolInfoStr = null;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot add cache pool " + req.getPoolName(), safeMode);
    }
    CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req,
        logRetryCache);
    poolInfoStr = info.toString();
    success = true;
  } finally {
    writeUnlock();
    logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
  }

  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot modify cache pool " + req.getPoolName(), safeMode);
    }
    FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String poolNameStr = "{poolName: " +
        (req == null ? null : req.getPoolName()) + "}";
    logAuditEvent(success, "modifyCachePool", poolNameStr,
                  req == null ? null : req.toString(), null);
  }

  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
void removeCachePool(String cachePoolName, boolean logRetryCache)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  writeLock();
  boolean success = false;
  try {
    checkOperation(OperationCategory.WRITE);
    if (isInSafeMode()) {
      throw new SafeModeException(
          "Cannot remove cache pool " + cachePoolName, safeMode);
    }
    FSNDNCacheOp.removeCachePool(this, cacheManager, cachePoolName,
        logRetryCache);
    success = true;
  } finally {
    writeUnlock();
    String poolNameStr = "{poolName: " + cachePoolName + "}";
    logAuditEvent(success, "removeCachePool", poolNameStr, null, null);
  }

  getEditLog().logSync();
}
项目:hadoop    文件:FSNamesystem.java   
BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
    throws IOException {
  BatchedListEntries<CachePoolEntry> results;
  checkOperation(OperationCategory.READ);
  boolean success = false;
  cacheManager.waitForRescanIfNeeded();
  readLock();
  try {
    checkOperation(OperationCategory.READ);
    results = FSNDNCacheOp.listCachePools(this, cacheManager, prevKey);
    success = true;
  } finally {
    readUnlock();
    logAuditEvent(success, "listCachePools", null, null, null);
  }
  return results;
}
项目:hadoop    文件:FSNamesystem.java   
void modifyAclEntries(final String src, List<AclEntry> aclSpec)
    throws IOException {
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
    auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec);
  } catch (AccessControlException e) {
    logAuditEvent(false, "modifyAclEntries", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "modifyAclEntries", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
void removeAclEntries(final String src, List<AclEntry> aclSpec)
    throws IOException {
  checkOperation(OperationCategory.WRITE);
  HdfsFileStatus auditStat = null;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
    auditStat = FSDirAclOp.removeAclEntries(dir, src, aclSpec);
  } catch (AccessControlException e) {
    logAuditEvent(false, "removeAclEntries", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "removeAclEntries", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
void removeDefaultAcl(final String src) throws IOException {
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
    auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
  } catch (AccessControlException e) {
    logAuditEvent(false, "removeDefaultAcl", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "removeDefaultAcl", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
void removeAcl(final String src) throws IOException {
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot remove ACL on " + src);
    auditStat = FSDirAclOp.removeAcl(dir, src);
  } catch (AccessControlException e) {
    logAuditEvent(false, "removeAcl", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "removeAcl", src, null, auditStat);
}
项目:hadoop    文件:FSNamesystem.java   
void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
  HdfsFileStatus auditStat = null;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot set ACL on " + src);
    auditStat = FSDirAclOp.setAcl(dir, src, aclSpec);
  } catch (AccessControlException e) {
    logAuditEvent(false, "setAcl", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  logAuditEvent(true, "setAcl", src, null, auditStat);
}