Java 类org.apache.hadoop.hdfs.protocol.HdfsConstants 实例源码

项目:hadoop    文件:TestQuotaByStorageType.java   
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(!fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify getContentSummary without any quota set
  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
项目:hadoop    文件:TestHDFSFileContextMainOperations.java   
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);

  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);

  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
项目:circus-train    文件:HdfsSnapshotLocationManager.java   
private Path createSnapshot() throws IOException {
  LOG.debug("Source table {}.{} has its data located at {}", sourceTable.getDbName(), sourceTable.getTableName(),
      sourceDataPath);

  FileSystem fileSystem = fileSystemFactory.get(sourceDataPath, sourceHiveConf);
  Path snapshotMetaDataPath = new Path(sourceDataPath, HdfsConstants.DOT_SNAPSHOT_DIR);
  Path resolvedLocation = sourceDataPath;
  if (fileSystem.exists(snapshotMetaDataPath)) {
    if (snapshotsDisabled) {
      LOG.info("Path {} can be snapshot, but feature has been disabled.", sourceDataPath);
    } else {
      LOG.debug("Creating source data snapshot: {}, {}", sourceDataPath, eventId);
      // fileSystem.createSnapshot does not return a fully qualified URI.
      resolvedLocation = fileSystem.makeQualified(fileSystem.createSnapshot(sourceDataPath, eventId));
      snapshotPath = resolvedLocation;
    }
  } else {
    LOG.debug("Snapshots not enabled on source location: {}", sourceDataPath);
  }
  return resolvedLocation;
}
项目:hadoop    文件:TestDatanodeRegister.java   
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();

  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();

  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();

  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();

  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
项目:hadoop    文件:DFSOutputStream.java   
/**
 * Create a socket for a write pipeline
 * @param first the first datanode 
 * @param length the pipeline length
 * @param client client
 * @return the socket connected to the first datanode
 */
static Socket createSocketForPipeline(final DatanodeInfo first,
    final int length, final DFSClient client) throws IOException {
  final String dnAddr = first.getXferAddr(
      client.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
  final Socket sock = client.socketFactory.createSocket();
  final int timeout = client.getDatanodeReadTimeout(length);
  NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
  sock.setSoTimeout(timeout);
  sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
  }
  return sock;
}
项目:hadoop    文件:TestMiniDFSCluster.java   
@Test(timeout=100000)
public void testIsClusterUpAfterShutdown() throws Throwable {
  Configuration conf = new HdfsConfiguration();
  File testDataCluster4 = new File(testDataPath, CLUSTER_4);
  String c4Path = testDataCluster4.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
  MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
  try {
    DistributedFileSystem dfs = cluster4.getFileSystem();
    dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    cluster4.shutdown();
  } finally {
    while(cluster4.isClusterUp()){
      Thread.sleep(1000);
    }  
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:TestEditLogFileInputStream.java   
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
项目:hadoop    文件:TestQuotaByStorageType.java   
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
项目:hadoop    文件:TestSymlinkHdfs.java   
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
  final Path dir = new Path(testBaseDir1());
  dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);

  final Path file = new Path(dir, "file");
  createAndWriteFile(file);

  //creating the first link should succeed
  final Path link1 = new Path(dir, "link1");
  wrapper.createSymlink(file, link1, false);

  try {
    //creating the second link should fail with QuotaExceededException.
    final Path link2 = new Path(dir, "link2");
    wrapper.createSymlink(file, link2, false);
    fail("Created symlink despite quota violation");
  } catch(QuotaExceededException qee) {
    //expected
  }
}
项目:hadoop    文件:NameNode.java   
/**
 * @return address of file system
 */
public static InetSocketAddress getAddress(URI filesystemURI) {
  String authority = filesystemURI.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
  }
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
      filesystemURI.getScheme())) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
        HdfsConstants.HDFS_URI_SCHEME));
  }
  return getAddress(authority);
}
项目:hadoop    文件:EditLogFileInputStream.java   
static FSEditLogLoader.EditLogValidation validateEditLog(File file)
    throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
    in.getVersion(true); // causes us to read the header
  } catch (LogHeaderCorruptException e) {
    // If the header is malformed or the wrong value, this indicates a corruption
    LOG.warn("Log file " + file + " has no valid header", e);
    return new FSEditLogLoader.EditLogValidation(0,
        HdfsConstants.INVALID_TXID, true);
  }

  try {
    return FSEditLogLoader.validateEditLog(in);
  } finally {
    IOUtils.closeStream(in);
  }
}
项目:hadoop    文件:EditLogFileInputStream.java   
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
项目:hadoop    文件:TestDistCpSync.java   
/**
 * Test the sync returns false in the following scenarios:
 * 1. the source/target dir are not snapshottable dir
 * 2. the source/target does not have the given snapshots
 * 3. changes have been made in target
 */
@Test
public void testFallback() throws Exception {
  // the source/target dir are not snapshottable dir
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  final Path spath = new Path(source,
      HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  // the source/target does not have the given snapshots
  dfs.allowSnapshot(source);
  dfs.allowSnapshot(target);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.createSnapshot(source, "s1");
  dfs.createSnapshot(source, "s2");
  dfs.createSnapshot(target, "s1");
  Assert.assertTrue(DistCpSync.sync(options, conf));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  // changes have been made in target
  final Path subTarget = new Path(target, "sub");
  dfs.mkdirs(subTarget);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.delete(subTarget, true);
  Assert.assertTrue(DistCpSync.sync(options, conf));
}
项目:hadoop    文件:Snapshot.java   
public static String getSnapshotPath(String snapshottableDir,
    String snapshotRelativePath) {
  final StringBuilder b = new StringBuilder(snapshottableDir);
  if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
    b.append(Path.SEPARATOR);
  }
  return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
      .append(Path.SEPARATOR)
      .append(snapshotRelativePath)
      .toString();
}
项目:hadoop    文件:TestTruncateQuotaUpdate.java   
@Before
public void setUp() throws Exception {
  final Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();

  fsdir = cluster.getNamesystem().getFSDirectory();
  dfs = cluster.getFileSystem();

  dfs.mkdirs(dir);
  dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA);
  dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA);
  dfs.setStoragePolicy(dir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
}
项目:hadoop    文件:FSImageFormat.java   
/**
 * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
 * byte array path component.
 */
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
    final int layoutVersion) {
  // If the LV doesn't support snapshots, we're doing an upgrade
  if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
    if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
      Preconditions.checkArgument(
          renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
          RESERVED_ERROR_MSG);
      component =
          DFSUtil.string2Bytes(renameReservedMap
              .get(HdfsConstants.DOT_SNAPSHOT_DIR));
    }
  }
  return component;
}
项目:hadoop    文件:TestHDFSFileContextMainOperations.java   
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);

  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);

  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
项目:hadoop    文件:SnapshotDiff.java   
private static String getSnapshotName(String name) {
  if (Path.CUR_DIR.equals(name)) { // current directory
    return "";
  }
  final int i;
  if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
    i = 0;
  } else if (name.startsWith(
      HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
    i = 1;
  } else {
    return name;
  }

  // get the snapshot name
  return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1);
}
项目:hadoop    文件:TestLazyPersistFiles.java   
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
项目:hadoop-oss    文件:NuCypherExtUtilClient.java   
public static URI getNNUri(InetSocketAddress namenode) {
  int port = namenode.getPort();
  String portString =
      (port == HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) ?
          "" : (":" + port);
  return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
      + namenode.getHostName() + portString);
}
项目:hadoop    文件:TestHistoryFileManager.java   
@Test
public void testCreateDirsWithFileSystem() throws Exception {
  dfsCluster.getFileSystem().setSafeMode(
      HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
  testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
}
项目:hadoop    文件:TestSymlinkHdfs.java   
@Test(timeout=10000)
/** Test create symlink with a max len name */
public void testCreateLinkMaxPathLink() throws IOException {
  Path dir  = new Path(testBaseDir1());
  Path file = new Path(testBaseDir1(), "file");
  final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH;
  final int dirLen     = dir.toString().length() + 1;
  int   len            = maxPathLen - dirLen;

  // Build a MAX_PATH_LENGTH path
  StringBuilder sb = new StringBuilder("");
  for (int i = 0; i < (len / 10); i++) {
    sb.append("0123456789");
  }
  for (int i = 0; i < (len % 10); i++) {
    sb.append("x");
  }
  Path link = new Path(sb.toString());
  assertEquals(maxPathLen, dirLen + link.toString().length()); 

  // Check that it works
  createAndWriteFile(file);
  wrapper.setWorkingDirectory(dir);
  wrapper.createSymlink(file, link, false);
  readFile(link);

  // Now modify the path so it's too large
  link = new Path(sb.toString()+"x");
  try {
    wrapper.createSymlink(file, link, false);
    fail("Path name should be too long");
  } catch (IOException x) {
    // Expected
  }
}
项目:hadoop    文件:Hdfs.java   
/**
 * This constructor has the signature needed by
 * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}
 * 
 * @param theUri which must be that of Hdfs
 * @param conf configuration
 * @throws IOException
 */
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
  super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);

  if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
    throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
  }
  String host = theUri.getHost();
  if (host == null) {
    throw new IOException("Incomplete HDFS URI, no host: " + theUri);
  }

  this.dfs = new DFSClient(theUri, conf, getStatistics());
}
项目:hadoop    文件:DFSUtil.java   
/**
 * Returns if the component is reserved.
 * 
 * <p>
 * Note that some components are only reserved under certain directories, e.g.
 * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
 * @return true, if the component is reserved
 */
public static boolean isReservedPathComponent(String component) {
  for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
    if (component.equals(reserved)) {
      return true;
    }
  }
  return false;
}
项目:hadoop    文件:NameNodeProxies.java   
/**
 * Creates the namenode proxy with the passed protocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 *
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 **/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(conf, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider == null) {
    // Non-HA case
    return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
        UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
  } else {
    // HA case
    Conf config = new Conf(conf);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
        RetryPolicies.failoverOnNetworkException(
            RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
            config.maxRetryAttempts, config.failoverSleepBaseMillis,
            config.failoverSleepMaxMillis));

    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  }
}
项目:hadoop    文件:TestReplicationPolicy.java   
/**
 * In this testcase, client is dataNodes[0], but none of the nodes on rack 1
 * is qualified to be chosen. So the 1st replica should be placed on either
 * rack 2 or rack 3. 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 1st replica,
 * @throws Exception
 */
@Test
public void testChoooseTarget4() throws Exception {
  // make data node 0 & 1 to be not qualified to choose: not enough disk space
  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  for(int i=0; i<3; i++) {
    assertFalse(isOnSameRack(targets[i], dataNodes[0]));
  }
  assertTrue(isOnSameRack(targets[0], targets[1]) ||
          isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[2]));

  for(int i=0; i<2; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop    文件:TestReplicationPolicyWithNodeGroup.java   
private static void setupDataNodeCapacity() {
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Sets or resets quotas by storage type for a directory.
 * @see ClientProtocol#setQuota(String, long, long, StorageType)
 */
void setQuotaByStorageType(String src, StorageType type, long quota)
    throws IOException {
  if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
      quota != HdfsConstants.QUOTA_RESET) {
    throw new IllegalArgumentException("Invalid values for quota :" +
      quota);
  }
  if (type == null) {
    throw new IllegalArgumentException("Invalid storage type(null)");
  }
  if (!type.supportTypeQuota()) {
    throw new IllegalArgumentException("Don't support Quota for storage type : "
      + type.toString());
  }
  TraceScope scope = getPathTraceScope("setQuotaByStorageType", src);
  try {
    namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
      FileNotFoundException.class,
      QuotaByStorageTypeExceededException.class,
      UnresolvedPathException.class,
      SnapshotAccessControlException.class);
  } finally {
    scope.close();
  }
}
项目:hadoop    文件:FSEditLogLoader.java   
/**
 * Throw appropriate exception during upgrade from 203, when editlog loading
 * could fail due to opcode conflicts.
 */
private void check203UpgradeFailure(int logVersion, Throwable e)
    throws IOException {
  // 0.20.203 version version has conflicting opcodes with the later releases.
  // The editlog must be emptied by restarting the namenode, before proceeding
  // with the upgrade.
  if (Storage.is203LayoutVersion(logVersion)
      && logVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) {
    String msg = "During upgrade failed to load the editlog version "
        + logVersion + " from release 0.20.203. Please go back to the old "
        + " release and restart the namenode. This empties the editlog "
        + " and saves the namespace. Resume the upgrade after this step.";
    throw new IOException(msg, e);
  }
}
项目:hadoop    文件:FSEditLogLoader.java   
/**
 * Find the last valid transaction ID in the stream.
 * If there are invalid or corrupt transactions in the middle of the stream,
 * validateEditLog will skip over them.
 * This reads through the stream but does not close it.
 */
static EditLogValidation validateEditLog(EditLogInputStream in) {
  long lastPos = 0;
  long lastTxId = HdfsConstants.INVALID_TXID;
  long numValid = 0;
  FSEditLogOp op = null;
  while (true) {
    lastPos = in.getPosition();
    try {
      if ((op = in.readOp()) == null) {
        break;
      }
    } catch (Throwable t) {
      FSImage.LOG.warn("Caught exception after reading " + numValid +
          " ops from " + in + " while determining its valid length." +
          "Position was " + lastPos, t);
      in.resync();
      FSImage.LOG.warn("After resync, position is " + in.getPosition());
      continue;
    }
    if (lastTxId == HdfsConstants.INVALID_TXID
        || op.getTransactionId() > lastTxId) {
      lastTxId = op.getTransactionId();
    }
    numValid++;
  }
  return new EditLogValidation(lastPos, lastTxId, false);
}
项目:hadoop    文件:FSEditLogLoader.java   
static EditLogValidation scanEditLog(EditLogInputStream in) {
  long lastPos = 0;
  long lastTxId = HdfsConstants.INVALID_TXID;
  long numValid = 0;
  FSEditLogOp op = null;
  while (true) {
    lastPos = in.getPosition();
    try {
      if ((op = in.readOp()) == null) { // TODO
        break;
      }
    } catch (Throwable t) {
      FSImage.LOG.warn("Caught exception after reading " + numValid +
          " ops from " + in + " while determining its valid length." +
          "Position was " + lastPos, t);
      in.resync();
      FSImage.LOG.warn("After resync, position is " + in.getPosition());
      continue;
    }
    if (lastTxId == HdfsConstants.INVALID_TXID
        || op.getTransactionId() > lastTxId) {
      lastTxId = op.getTransactionId();
    }
    numValid++;
  }
  return new EditLogValidation(lastPos, lastTxId, false);
}
项目:hadoop    文件:FSDirAclOp.java   
static AclStatus getAclStatus(
    FSDirectory fsd, String src) throws IOException {
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  String srcs = FSDirectory.normalizePath(src);
  fsd.readLock();
  try {
    // There is no real inode for the path ending in ".snapshot", so return a
    // non-null, unpopulated AclStatus.  This is similar to getFileInfo.
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) &&
        fsd.getINode4DotSnapshot(srcs) != null) {
      return new AclStatus.Builder().owner("").group("").build();
    }
    INodesInPath iip = fsd.getINodesInPath(srcs, true);
    if (fsd.isPermissionEnabled()) {
      fsd.checkTraverse(pc, iip);
    }
    INode inode = FSDirectory.resolveLastINode(iip);
    int snapshotId = iip.getPathSnapshotId();
    List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
            inode.getLocalNameBytes(), inode, snapshotId));
    FsPermission fsPermission = inode.getFsPermission(snapshotId);
    return new AclStatus.Builder()
        .owner(inode.getUserName()).group(inode.getGroupName())
        .stickyBit(fsPermission.getStickyBit())
        .setPermission(fsPermission)
        .addEntries(acl).build();
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:TestQuotaByStorageType.java   
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception {
  testQuotaByStorageTypeWithFileCreateCase(
      HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
      StorageType.SSD,
      (short)1);
}
项目:hadoop    文件:NNStorage.java   
/**
 * Format all available storage directories.
 */
public void format(NamespaceInfo nsInfo) throws IOException {
  Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 ||
      nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION,
      "Bad layout version: %s", nsInfo.getLayoutVersion());

  this.setStorageInfo(nsInfo);
  this.blockpoolID = nsInfo.getBlockPoolID();
  for (Iterator<StorageDirectory> it =
                         dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    format(sd);
  }
}
项目:hadoop    文件:TestQuotaByStorageType.java   
/**
 * Both traditional space quota and the storage type quota for SSD are set and
 * not exceeded.
 */
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception {
  final Path foo = new Path(dir, "foo");
  dfs.mkdirs(foo);

  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, REPLICATION * BLOCKSIZE * 10);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  Path createdFile = new Path(foo, "created_file.data");
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);

  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());

  dfs.delete(createdFile, true);

  QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(1, cntAfterDelete.getNameSpace());
  assertEquals(0, cntAfterDelete.getStorageSpace());

  // Validate the computeQuotaUsage()
  QuotaCounts counts = new QuotaCounts.Builder().build();
  fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
  assertEquals(fnode.dumpTreeRecursively().toString(), 1,
      counts.getNameSpace());
  assertEquals(fnode.dumpTreeRecursively().toString(), 0,
      counts.getStorageSpace());
}
项目:hadoop    文件:TestReplicationPolicyWithNodeGroup.java   
/**
 * Test replica placement policy in case of boundary topology.
 * Rack 2 has only 1 node group & can't be placed with two replicas
 * The 1st replica will be placed on writer.
 * The 2nd replica should be placed on a different rack 
 * The 3rd replica should be placed on the same rack with writer, but on a 
 * different node group.
 */
@Test
public void testChooseTargetsOnBoundaryTopology() throws Exception {
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.remove(dataNodes[i]);
  }

  for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
    cluster.add(dataNodesInBoundaryCase[i]);
  }
  for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
    updateHeartbeatWithUsage(dataNodes[0],
              2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
              (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE,
              0L, 0L, 0L, 0, 0);

    updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 1);

  targets = chooseTarget(2, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3, dataNodesInBoundaryCase[0]);
  assertEquals(targets.length, 3);
  assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
项目:hadoop    文件:TestReplicationPolicyWithNodeGroup.java   
/**
 * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
 * to be chosen. So the 1st replica should be placed on dataNodes[1], 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
 * and the rest should be placed on the third rack.
 * @throws Exception
 */
@Test
public void testChooseTarget3() throws Exception {
  // make data node 0 to be not qualified to choose
  updateHeartbeatWithUsage(dataNodes[0],
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
      0L, 0L, 0, 0); // no space

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertEquals(storages[1], targets[0]);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertEquals(storages[1], targets[0]);
  assertTrue(isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertEquals(storages[1], targets[0]);
  assertTrue(cluster.isNodeGroupAware());
  verifyNoTwoTargetsOnSameNodeGroup(targets);
  assertTrue(isOnSameRack(targets[1], targets[2]) ||
             isOnSameRack(targets[2], targets[3]));

  updateHeartbeatWithUsage(dataNodes[0],
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
项目:hadoop    文件:TestReplicationPolicy.java   
/**
 * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
 * to be chosen. So the 1st replica should be placed on dataNodes[1], 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 2nd replica,
 * and the rest should be placed on the third rack.
 * @throws Exception
 */
@Test
public void testChooseTarget3() throws Exception {
  // make data node 0 to be not qualified to choose
  updateHeartbeatWithUsage(dataNodes[0],
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
      0L, 0L, 0, 0); // no space

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertEquals(storages[1], targets[0]);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertEquals(storages[1], targets[0]);
  assertTrue(isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertEquals(storages[1], targets[0]);
  for(int i=1; i<4; i++) {
    assertFalse(isOnSameRack(targets[0], targets[i]));
  }
  assertTrue(isOnSameRack(targets[1], targets[2]) ||
             isOnSameRack(targets[2], targets[3]));
  assertFalse(isOnSameRack(targets[1], targets[3]));

  resetHeartbeatForStorages();
}