Java 类org.apache.hadoop.hdfs.DFSUtil 实例源码

项目:hadoop    文件:Nfs3HttpServer.java   
void start() throws IOException {
  final InetSocketAddress httpAddr = getHttpAddress(conf);

  final String httpsAddrString = conf.get(
      NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
      NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "nfs3",
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
      NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);

  this.httpServer = builder.build();
  this.httpServer.start();

  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
  }

  if (policy.isHttpsEnabled()) {
    infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
  }
}
项目:hadoop    文件:TestReplicationPolicy.java   
/**
 * This testcase tests whether the value returned by
 * DFSUtil.getReplWorkMultiplier() is positive,
 * and whether an IllegalArgumentException will be thrown 
 * when a non-positive value is retrieved
 */
@Test
public void testGetReplWorkMultiplier() {
  Configuration conf = new Configuration();
  int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
  assertTrue(blocksReplWorkMultiplier > 0);

  conf.set(DFSConfigKeys.
      DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
  blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
  assertEquals(blocksReplWorkMultiplier, 3);

  conf.set(DFSConfigKeys.
      DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
  exception.expect(IllegalArgumentException.class);
  blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
}
项目:hadoop    文件:JspHelper.java   
private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
    throws IOException {
  if (nodes == null || nodes.length == 0) {
    throw new IOException("No nodes contain this block");
  }
  int l = 0;
  while (l < nodes.length && !nodes[l].isDecommissioned()) {
    ++l;
  }

  if (l == 0) {
    throw new IOException("No active nodes contain this block");
  }

  int index = doRandom ? DFSUtil.getRandom().nextInt(l) : 0;
  return nodes[index];
}
项目:hadoop    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
项目:hadoop    文件:TestByteArrayManager.java   
Future<byte[]> submitAllocate() {
  count.incrementAndGet();

  final Future<byte[]> f = pool.submit(new Callable<byte[]>() {
    @Override
    public byte[] call() throws Exception {
      final int lower = maxArrayLength == ByteArrayManager.MIN_ARRAY_LENGTH?
          0: maxArrayLength >> 1;
      final int arrayLength = DFSUtil.getRandom().nextInt(
          maxArrayLength - lower) + lower + 1;
      final byte[] array = bam.newByteArray(arrayLength);
      try {
        Assert.assertEquals("arrayLength=" + arrayLength + ", lower=" + lower,
            maxArrayLength, array.length);
      } catch(AssertionError e) {
        assertionErrors.add(e);
      }
      return array;
    }
  });
  synchronized (arrays) {
    arrays.add(f);
  }
  return f;
}
项目:hadoop    文件:TestJsonUtil.java   
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
项目:hadoop    文件:BackupNode.java   
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
项目:hadoop    文件:FSDirectory.java   
/**
 * Verify child's name for fs limit.
 *
 * @param childName byte[] containing new child name
 * @param parentPath String containing parent path
 * @throws PathComponentTooLongException child's name is too long.
 */
void verifyMaxComponentLength(byte[] childName, String parentPath)
    throws PathComponentTooLongException {
  if (maxComponentLength == 0) {
    return;
  }

  final int length = childName.length;
  if (length > maxComponentLength) {
    final PathComponentTooLongException e = new PathComponentTooLongException(
        maxComponentLength, length, parentPath,
        DFSUtil.bytes2String(childName));
    if (namesystem.isImageLoaded()) {
      throw e;
    } else {
      // Do not throw if edits log is still being processed
      NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
    }
  }
}
项目:hadoop    文件:PBHelper.java   
public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
  if (entry == null) {
    return null;
  }
  ByteString sourcePath = ByteString
      .copyFrom(entry.getSourcePath() == null ? DFSUtil.EMPTY_BYTES : entry
          .getSourcePath());
  String modification = entry.getType().getLabel();
  SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
      .newBuilder().setFullpath(sourcePath)
      .setModificationLabel(modification);
  if (entry.getType() == DiffType.RENAME) {
    ByteString targetPath = ByteString
        .copyFrom(entry.getTargetPath() == null ? DFSUtil.EMPTY_BYTES : entry
            .getTargetPath());
    builder.setTargetPath(targetPath);
  }
  return builder.build();
}
项目:hadoop    文件:BPServiceActor.java   
/**
 * Schedule the next block report after the block report interval. If the
 * current block report was delayed then the next block report is sent per
 * the original schedule.
 * Numerical overflow is possible here.
 */
void scheduleNextBlockReport() {
  // If we have sent the first set of block reports, then wait a random
  // time before we start the periodic block reports.
  if (resetBlockReportTime) {
    nextBlockReportTime = monotonicNow() +
        DFSUtil.getRandom().nextInt((int)(blockReportIntervalMs));
    resetBlockReportTime = false;
  } else {
    /* say the last block report was at 8:20:14. The current report
     * should have started around 9:20:14 (default 1 hour interval).
     * If current time is :
     *   1) normal like 9:20:18, next report should be at 10:20:14
     *   2) unexpected like 11:35:43, next report should be at 12:20:14
     */
    nextBlockReportTime +=
          (((monotonicNow() - nextBlockReportTime + blockReportIntervalMs) /
              blockReportIntervalMs)) * blockReportIntervalMs;
  }
}
项目:hadoop    文件:NameNode.java   
/**
 */
public static void main(String argv[]) throws Exception {
  if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
    NameNode namenode = createNameNode(argv, null);
    if (namenode != null) {
      namenode.join();
    }
  } catch (Throwable e) {
    LOG.error("Failed to start namenode.", e);
    terminate(1, e);
  }
}
项目:hadoop    文件:MiniQJMHACluster.java   
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);

  return conf;
}
项目:hadoop    文件:PBHelper.java   
public static SnapshottableDirectoryStatusProto convert(
    SnapshottableDirectoryStatus status) {
  if (status == null) {
    return null;
  }
  int snapshotNumber = status.getSnapshotNumber();
  int snapshotQuota = status.getSnapshotQuota();
  byte[] parentFullPath = status.getParentFullPath();
  ByteString parentFullPathBytes = ByteString.copyFrom(
      parentFullPath == null ? DFSUtil.EMPTY_BYTES : parentFullPath);
  HdfsFileStatusProto fs = convert(status.getDirStatus());
  SnapshottableDirectoryStatusProto.Builder builder = 
      SnapshottableDirectoryStatusProto
      .newBuilder().setSnapshotNumber(snapshotNumber)
      .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
      .setDirStatus(fs);
  return builder.build();
}
项目:hadoop    文件:FSImage.java   
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
项目:hadoop    文件:TestByteArrayManager.java   
@Override
public void run() {
  for(int i = 0; i < n; i++) {
    final boolean isAllocate = DFSUtil.getRandom().nextInt(NUM_RUNNERS) < p;
    if (isAllocate) {
      submitAllocate();
    } else {
      try {
        final Future<byte[]> f = removeFirst();
        if (f != null) {
          submitRecycle(f.get());
        }
      } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(this + " has " + e);
      }
    }

    if ((i & 0xFF) == 0) {
      sleepMs(100);
    }
  }
}
项目:hadoop    文件:TestSnapshotDiffReport.java   
/**
 * Rename a file and then append some data to it
 */
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  final Path bar = new Path(root, "bar");
  hdfs.rename(foo, bar);
  DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");

  // we always put modification on the file before rename
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
      new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
          DFSUtil.string2Bytes("bar")));
}
项目:hadoop    文件:INodesInPath.java   
private String toString(boolean vaildateObject) {
  if (vaildateObject) {
    validate();
  }

  final StringBuilder b = new StringBuilder(getClass().getSimpleName())
      .append(": path = ").append(DFSUtil.byteArray2PathString(path))
      .append("\n  inodes = ");
  if (inodes == null) {
    b.append("null");
  } else if (inodes.length == 0) {
    b.append("[]");
  } else {
    b.append("[").append(toString(inodes[0]));
    for(int i = 1; i < inodes.length; i++) {
      b.append(", ").append(toString(inodes[i]));
    }
    b.append("], length=").append(inodes.length);
  }
  b.append("\n  isSnapshot        = ").append(isSnapshot)
   .append("\n  snapshotId        = ").append(snapshotId);
  return b.toString();
}
项目:hadoop    文件:FSImageFormat.java   
private static void setRenameReservedMapInternal(String renameReserved) {
  Collection<String> pairs =
      StringUtils.getTrimmedStringCollection(renameReserved);
  for (String p : pairs) {
    String[] pair = StringUtils.split(p, '/', '=');
    Preconditions.checkArgument(pair.length == 2,
        "Could not parse key-value pair " + p);
    String key = pair[0];
    String value = pair[1];
    Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
        "Unknown reserved path " + key);
    Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
        "Invalid rename path for " + key + ": " + value);
    LOG.info("Will rename reserved path " + key + " to " + value);
    renameReservedMap.put(key, value);
  }
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support inode IDs, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
    if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
          RESERVED_ERROR_MSG);
      final String renameString = renameReservedMap
          .get(FSDirectory.DOT_RESERVED_STRING);
      component =
          DFSUtil.string2Bytes(renameString);
      LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
          + " to " + renameString);
    }
  }
  return component;
}
项目:hadoop    文件:FSImageTestUtil.java   
/**
 * Create an aborted in-progress log in the given directory, containing
 * only a specified number of "mkdirs" operations.
 */
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
    long firstTxId, long newInodeId) throws IOException {
  FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
  editLog.setNextTxId(firstTxId);
  editLog.openForWrite();

  PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
      FsPermission.createImmutable((short)0755));
  for (int i = 1; i <= numDirs; i++) {
    String dirName = "dir" + i;
    INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
        DFSUtil.string2Bytes(dirName), perms, 0L);
    editLog.logMkDir("/" + dirName, dir);
  }
  editLog.logSync();
  editLog.abortCurrentLogSegment();
}
项目:hadoop    文件:Nfs3HttpServer.java   
/**
 * Return the URI that locates the HTTP server.
 */
public URI getServerURI() {
  // getHttpClientScheme() only returns https for HTTPS_ONLY policy. This
  // matches the behavior that the first connector is a HTTPS connector only
  // for HTTPS_ONLY policy.
  InetSocketAddress addr = httpServer.getConnectorAddress(0);
  return URI.create(DFSUtil.getHttpClientScheme(conf) + "://"
      + NetUtils.getHostPortString(addr));
}
项目:hadoop    文件:JsonUtil.java   
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
项目:hadoop    文件:TestStorageMover.java   
private void runMover() throws Exception {
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  Map<URI, List<Path>> nnMap = Maps.newHashMap();
  for (URI nn : namenodes) {
    nnMap.put(nn, null);
  }
  int result = Mover.run(nnMap, conf);
  Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
}
项目:hadoop    文件:WebHdfsFileSystem.java   
@Override
public synchronized void setWorkingDirectory(final Path dir) {
  String result = makeAbsolute(dir).toUri().getPath();
  if (!DFSUtil.isValidName(result)) {
    throw new IllegalArgumentException("Invalid DFS directory name " + 
                                       result);
  }
  workingDir = makeAbsolute(dir);
}
项目:hadoop    文件:WebHdfsFileSystem.java   
@Override
public BlockLocation[] getFileBlockLocations(final Path p, 
    final long offset, final long length) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
  return new FsPathResponseRunner<BlockLocation[]>(op, p,
      new OffsetParam(offset), new LengthParam(length)) {
    @Override
    BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
      return DFSUtil.locatedBlocks2Locations(
          JsonUtil.toLocatedBlocks(json));
    }
  }.run();
}
项目:hadoop    文件:CacheAdmin.java   
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
    throws IOException {
  CacheDirectiveInfo.Expiration ex = null;
  if (ttlString != null) {
    if (ttlString.equalsIgnoreCase("never")) {
      ex = CacheDirectiveInfo.Expiration.NEVER;
    } else {
      long ttl = DFSUtil.parseRelativeTime(ttlString);
      ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
    }
  }
  return ex;
}
项目:hadoop    文件:TestByteArrayManager.java   
@Test
public void testCounter() throws Exception {
  final long countResetTimePeriodMs = 200L;
  final Counter c = new Counter(countResetTimePeriodMs);

  final int n = DFSUtil.getRandom().nextInt(512) + 512;
  final List<Future<Integer>> futures = new ArrayList<Future<Integer>>(n);

  final ExecutorService pool = Executors.newFixedThreadPool(32);
  try {
    // increment
    for(int i = 0; i < n; i++) {
      futures.add(pool.submit(new Callable<Integer>() {
        @Override
        public Integer call() throws Exception {
          return (int)c.increment();
        }
      }));
    }

    // sort and wait for the futures
    Collections.sort(futures, CMP);
  } finally {
    pool.shutdown();
  }

  // check futures
  Assert.assertEquals(n, futures.size());
  for(int i = 0; i < n; i++) {
    Assert.assertEquals(i + 1, futures.get(i).get().intValue());
  }
  Assert.assertEquals(n, c.getCount());

  // test auto-reset
  Thread.sleep(countResetTimePeriodMs + 100);
  Assert.assertEquals(1, c.increment());
}
项目:hadoop    文件:SnapshottableDirectoryStatus.java   
@Override
public int compare(SnapshottableDirectoryStatus left,
                   SnapshottableDirectoryStatus right) {
  int d = DFSUtil.compareBytes(left.parentFullPath, right.parentFullPath);
  return d != 0? d
      : DFSUtil.compareBytes(left.dirStatus.getLocalNameInBytes(),
          right.dirStatus.getLocalNameInBytes());
}
项目:hadoop    文件:TestTransferFsImage.java   
/**
 * Similar to the above test, except that there are multiple local files
 * and one of them can be saved.
 */
@Test
public void testClientSideExceptionOnJustOneDir() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0).build();
  NNStorage mockStorage = Mockito.mock(NNStorage.class);
  List<File> localPaths = ImmutableList.of(
      new File("/xxxxx-does-not-exist/blah"),
      new File(TEST_DIR, "testfile")    
      );

  try {
    URL fsName = DFSUtil.getInfoServer(
        cluster.getNameNode().getServiceRpcAddress(), conf,
        DFSUtil.getHttpClientScheme(conf)).toURL();

    String id = "getimage=1&txid=0";

    TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);      
    Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0));
    assertTrue("The valid local file should get saved properly",
        localPaths.get(1).length() > 0);
  } finally {
    cluster.shutdown();      
  }
}
项目:hadoop    文件:CacheDirectiveInfo.java   
@Override
public String toString() {
  if (isRelative) {
    return DFSUtil.durationToString(ms);
  }
  return DFSUtil.dateToIso8601String(new Date(ms));
}
项目:hadoop    文件:TestFileTruncate.java   
/**
 * Truncate files and then run other operations such as
 * rename, set replication, set permission, etc.
 */
@Test
public void testTruncateWithOtherOperations() throws IOException {
  Path dir = new Path("/testTruncateOtherOperations");
  fs.mkdirs(dir);
  final Path p = new Path(dir, "file");
  final byte[] data = new byte[2 * BLOCK_SIZE];

  DFSUtil.getRandom().nextBytes(data);
  writeContents(data, data.length, p);

  final int newLength = data.length - 1;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  fs.setReplication(p, (short)(REPLICATION - 1));
  fs.setPermission(p, FsPermission.createImmutable((short)0444));

  final Path q = new Path(dir, "newFile");
  fs.rename(p, q);

  checkBlockRecovery(q);
  checkFullFile(q, newLength, data);

  cluster.restartNameNode();
  checkFullFile(q, newLength, data);

  fs.delete(dir, true);
}
项目:hadoop    文件:NamenodeWebHdfsMethods.java   
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop    文件:FSPermissionChecker.java   
private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx,
    INode inode, int snapshotId) {
  INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
  if (getAttributesProvider() != null) {
    String[] elements = new String[pathIdx + 1];
    for (int i = 0; i < elements.length; i++) {
      elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
    }
    inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
  }
  return inodeAttrs;
}
项目:hadoop    文件:NameNodeHttpServer.java   
private Map<String, String> getAuthFilterParams(Configuration conf)
    throws IOException {
  Map<String, String> params = new HashMap<String, String>();
  String principalInConf = conf
      .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params
        .put(
            DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
            SecurityUtil.getServerPrincipal(principalInConf,
                                            bindAddress.getHostName()));
  } else if (UserGroupInformation.isSecurityEnabled()) {
    HttpServer2.LOG.error(
        "WebHDFS and security are enabled, but configuration property '" +
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
        "' is not set.");
  }
  String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
      DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put(
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
        httpKeytab);
  } else if (UserGroupInformation.isSecurityEnabled()) {
    HttpServer2.LOG.error(
        "WebHDFS and security are enabled, but configuration property '" +
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
        "' is not set.");
  }
  String anonymousAllowed = conf
    .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED);
  if (anonymousAllowed != null && !anonymousAllowed.isEmpty()) {
  params.put(
      DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,
      anonymousAllowed);
  }
  return params;
}
项目:hadoop    文件:TestShortCircuitLocalRead.java   
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:hadoop    文件:AdminHelper.java   
/**
 * Parses a time-to-live value from a string
 * @return The ttl in milliseconds
 * @throws IOException if it could not be parsed
 */
static Long parseTtlString(String maxTtlString) throws IOException {
  Long maxTtl = null;
  if (maxTtlString != null) {
    if (maxTtlString.equalsIgnoreCase("never")) {
      maxTtl = CachePoolInfo.RELATIVE_EXPIRY_NEVER;
    } else {
      maxTtl = DFSUtil.parseRelativeTime(maxTtlString);
    }
  }
  return maxTtl;
}
项目:hadoop    文件:BootstrapStandby.java   
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }

  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
项目:hadoop    文件:DFSck.java   
/**
 * Derive the namenode http address from the current file system,
 * either default or as set by "-fs" in the generic options.
 * @return Returns http address or null if failure.
 * @throws IOException if we can't determine the active NN address
 */
private URI getCurrentNamenodeAddress(Path target) throws IOException {
  //String nnAddress = null;
  Configuration conf = getConf();

  //get the filesystem object to verify it is an HDFS system
  final FileSystem fs = target.getFileSystem(conf);
  if (!(fs instanceof DistributedFileSystem)) {
    System.err.println("FileSystem is " + fs.getUri());
    return null;
  }

  return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
      DFSUtil.getHttpClientScheme(conf));
}
项目:hadoop    文件:TestBlockToken.java   
@Test
public void testBlockTokenRpc() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(conf);

  BlockTokenSecretManager sm = new BlockTokenSecretManager(
      blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
  Token<BlockTokenIdentifier> token = sm.generateToken(block3,
      EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));

  final Server server = createMockDatanode(sm, token, conf);

  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  final UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(block3.toString());
  ticket.addToken(token);

  ClientDatanodeProtocol proxy = null;
  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
        NetUtils.getDefaultSocketFactory(conf));
    assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}