Java 类org.apache.hadoop.hbase.fs.HFileSystem 实例源码

项目:ditb    文件:MasterFileSystem.java   
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
项目:ditb    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family. If the source file is on the
 * same different file-system is moved from the source location to the destination location,
 * otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath    {@link Path} to the file to import
 * @param seqNum     Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
        + "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:ditb    文件:HFile.java   
/**
 * Method returns the reader given the specified arguments.
 * TODO This is a bad abstraction.  See HBASE-6635.
 *
 * @param path hfile's path
 * @param fsdis stream of path's file
 * @param size max size of the trailer.
 * @param cacheConf Cache configuation values, cannot be null.
 * @param hfs
 * @return an appropriate instance of HFileReader
 * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
 */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional")
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis, long size,
    CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
  FixedFileTrailer trailer = null;
  try {
    boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
    assert !isHBaseChecksum; // Initially we must read with FS checksum.
    trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
    switch (trailer.getMajorVersion()) {
    case 2:
      return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
    case 3:
      return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
    default:
      throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
    }
  } catch (Throwable t) {
    try {
      fsdis.close();
    } catch (Throwable t2) {
      LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
    }
    throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
  }
}
项目:ditb    文件:TestChecksum.java   
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
  Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(BlockType.DATA);
  for (int i = 0; i < 1000; ++i)
    dos.writeInt(i);
  hbw.writeHeaderAndData(os);
  int totalSize = hbw.getOnDiskSizeWithHeader();
  os.close();

  // Use hbase checksums.
  assertEquals(true, hfs.useHBaseChecksum());

  FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
  meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
  HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
      is, totalSize, (HFileSystem) fs, path, meta);
  HFileBlock b = hbr.readBlockData(0, -1, -1, false);
  assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
项目:ditb    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
          false, false, false);
}
项目:ditb    文件:IntegrationTestBigLinkedList.java   
@Override public int run(String[] args) throws Exception {
  if (args.length < 1) {
    System.err.println("Usage: Clean <output dir>");
    return -1;
  }

  Path p = new Path(args[0]);
  Configuration conf = getConf();
  TableName tableName = getTableName(conf);
  try (FileSystem fs = HFileSystem.get(conf);
      Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()) {
    if (admin.tableExists(tableName)) {
      admin.disableTable(tableName);
      admin.deleteTable(tableName);
    }

    if (fs.exists(p)) {
      fs.delete(p, true);
    }
  }

  return 0;
}
项目:LCIndex-HBase-0.94.16    文件:StoreFile.java   
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size, CacheConfig cacheConf,
    DataBlockEncoding preferredEncodingInCache, boolean closeIStream) throws IOException {
  super(path);
  pWinterPath = path;
  FSDataInputStream in = hfileLink.open(fs);
  FSDataInputStream inNoChecksum = in;
  if (fs instanceof HFileSystem) {
    FileSystem noChecksumFs = ((HFileSystem) fs).getNoChecksumFs();
    inNoChecksum = hfileLink.open(noChecksumFs);
  }

  reader =
      HFile.createReaderWithEncoding(fs, path, in, inNoChecksum, size, cacheConf,
        preferredEncodingInCache, closeIStream);
  bloomFilterType = BloomType.NONE;
}
项目:LCIndex-HBase-0.94.16    文件:AbstractHFileReader.java   
protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
    final long fileSize,
    final boolean closeIStream,
    final CacheConfig cacheConf, final HFileSystem hfs) {
  super(null, path);
  this.trailer = trailer;
  this.compressAlgo = trailer.getCompressionCodec();
  this.cacheConf = cacheConf;
  this.fileSize = fileSize;
  this.istream = fsdis;
  this.closeIStream = closeIStream;
  this.path = path;
  this.name = path.getName();
  this.hfs = hfs;
  this.istreamNoFsChecksum = fsdisNoFsChecksum;
}
项目:LCIndex-HBase-0.94.16    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
    Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
    throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:LCIndex-HBase-0.94.16    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:pbase    文件:MasterFileSystem.java   
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
项目:pbase    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:pbase    文件:HFile.java   
/**
 * Method returns the reader given the specified arguments.
 * TODO This is a bad abstraction.  See HBASE-6635.
 *
 * @param path hfile's path
 * @param fsdis stream of path's file
 * @param size max size of the trailer.
 * @param cacheConf Cache configuation values, cannot be null.
 * @param hfs
 * @return an appropriate instance of HFileReader
 * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
 */
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
    long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
  FixedFileTrailer trailer = null;
  try {
    boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
    assert !isHBaseChecksum; // Initially we must read with FS checksum.
    trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
    switch (trailer.getMajorVersion()) {
    case 2:
      return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
    case 3 :
      return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
    default:
      throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
    }
  } catch (Throwable t) {
    try {
      fsdis.close();
    } catch (Throwable t2) {
      LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
    }
    throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
  }
}
项目:pbase    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, true, false);
}
项目:HIndex    文件:MasterFileSystem.java   
public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
    master.getConfiguration(), master, services,
    master.getServerName(), masterRecovery);
}
项目:HIndex    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:HIndex    文件:HFile.java   
/**
 * Method returns the reader given the specified arguments.
 * TODO This is a bad abstraction.  See HBASE-6635.
 *
 * @param path hfile's path
 * @param fsdis stream of path's file
 * @param size max size of the trailer.
 * @param cacheConf Cache configuation values, cannot be null.
 * @param hfs
 * @return an appropriate instance of HFileReader
 * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
 */
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
    long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
  FixedFileTrailer trailer = null;
  try {
    boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
    assert !isHBaseChecksum; // Initially we must read with FS checksum.
    trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
    switch (trailer.getMajorVersion()) {
    case 2:
      return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
    case 3 :
      return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
    default:
      throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
    }
  } catch (Throwable t) {
    try {
      fsdis.close();
    } catch (Throwable t2) {
      LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
    }
    throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
  }
}
项目:HIndex    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:IRIndex    文件:StoreFile.java   
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
    CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
    boolean closeIStream) throws IOException {
  super(path);

  FSDataInputStream in = hfileLink.open(fs);
  FSDataInputStream inNoChecksum = in;
  if (fs instanceof HFileSystem) {
    FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
    inNoChecksum = hfileLink.open(noChecksumFs);
  }

  reader = HFile.createReaderWithEncoding(fs, path, in, inNoChecksum,
              size, cacheConf, preferredEncodingInCache, closeIStream);
  bloomFilterType = BloomType.NONE;
}
项目:IRIndex    文件:AbstractHFileReader.java   
protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
    final long fileSize,
    final boolean closeIStream,
    final CacheConfig cacheConf, final HFileSystem hfs) {
  super(null, path);
  this.trailer = trailer;
  this.compressAlgo = trailer.getCompressionCodec();
  this.cacheConf = cacheConf;
  this.fileSize = fileSize;
  this.istream = fsdis;
  this.closeIStream = closeIStream;
  this.path = path;
  this.name = path.getName();
  this.hfs = hfs;
  this.istreamNoFsChecksum = fsdisNoFsChecksum;
}
项目:IRIndex    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:IRIndex    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:hbase    文件:MasterFileSystem.java   
public MasterFileSystem(Configuration conf) throws IOException {
  this.conf = conf;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  this.walRootDir = FSUtils.getWALRootDir(conf);
  this.walFs = FSUtils.getWALFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.walFs.getUri()));
  walFs.setConf(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700"));
  this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
  // setup the filesystem variable
  createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
}
项目:hbase    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  srcPath = srcFs.resolvePath(srcPath);
  FileSystem realSrcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
}
项目:hbase    文件:HFileBlock.java   
FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path,
    HFileContext fileContext) throws IOException {
  this.fileSize = fileSize;
  this.hfs = hfs;
  if (path != null) {
    this.pathName = path.toString();
  }
  this.fileContext = fileContext;
  this.hdrSize = headerSize(fileContext.isUseHBaseChecksum());

  this.streamWrapper = stream;
  // Older versions of HBase didn't support checksum.
  this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum());
  defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext);
  encodedBlockDecodingCtx = defaultDecodingCtx;
}
项目:hbase    文件:TestChecksum.java   
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
  Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(BlockType.DATA);
  for (int i = 0; i < 1000; ++i)
    dos.writeInt(i);
  hbw.writeHeaderAndData(os);
  int totalSize = hbw.getOnDiskSizeWithHeader();
  os.close();

  // Use hbase checksums.
  assertEquals(true, hfs.useHBaseChecksum());

  FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
  meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
  HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
      is, totalSize, (HFileSystem) fs, path, meta);
  HFileBlock b = hbr.readBlockData(0, -1, false, false);
  assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
项目:hbase    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
  cacheConf =
      new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
      cowType.shouldBeCached(BlockType.LEAF_INDEX),
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
          false, false);
}
项目:hbase    文件:IntegrationTestBigLinkedList.java   
@Override public int run(String[] args) throws Exception {
  if (args.length < 1) {
    System.err.println("Usage: Clean <output dir>");
    return -1;
  }

  Path p = new Path(args[0]);
  Configuration conf = getConf();
  TableName tableName = getTableName(conf);
  try (FileSystem fs = HFileSystem.get(conf);
      Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()) {
    if (admin.tableExists(tableName)) {
      admin.disableTable(tableName);
      admin.deleteTable(tableName);
    }

    if (fs.exists(p)) {
      fs.delete(p, true);
    }
  }

  return 0;
}
项目:PyroDB    文件:MasterFileSystem.java   
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
    master.getConfiguration(), master, services,
    master.getServerName());
}
项目:PyroDB    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:PyroDB    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  this.conf.set("dfs.datanode.data.dir.perm", "700");
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:c5    文件:HRegionFileSystem.java   
/**
 * Bulk load: Add a specified store file to the specified family.
 * If the source file is on the same different file-system is moved from the
 * source location to the destination location, otherwise is copied over.
 *
 * @param familyName Family that will gain the file
 * @param srcPath {@link Path} to the file to import
 * @param seqNum Bulk Load sequence number
 * @return The destination {@link Path} of the bulk loaded file
 * @throws IOException
 */
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
    throws IOException {
  // Copy the file if it's on another filesystem
  FileSystem srcFs = srcPath.getFileSystem(conf);
  FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;

  // We can't compare FileSystem instances as equals() includes UGI instance
  // as part of the comparison and won't work when doing SecureBulkLoad
  // TODO deal with viewFS
  if (!srcFs.getUri().equals(desFs.getUri())) {
    LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
        "the destination store. Copying file over to destination filesystem.");
    Path tmpPath = createTempName();
    FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
    LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
    srcPath = tmpPath;
  }

  return commitStoreFile(familyName, srcPath, seqNum, true);
}
项目:c5    文件:HFile.java   
/**
 * Method returns the reader given the specified arguments.
 * TODO This is a bad abstraction.  See HBASE-6635.
 *
 * @param path hfile's path
 * @param fsdis stream of path's file
 * @param size max size of the trailer.
 * @param cacheConf Cache configuation values, cannot be null.
 * @param hfs
 * @return an appropriate instance of HFileReader
 * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
 */
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
    long size, CacheConfig cacheConf, HFileSystem hfs) throws IOException {
  FixedFileTrailer trailer = null;
  try {
    boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
    assert !isHBaseChecksum; // Initially we must read with FS checksum.
    trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
    switch (trailer.getMajorVersion()) {
    case 2:
      return new HFileReaderV2(
        path, trailer, fsdis, size, cacheConf, hfs);
    default:
      throw new CorruptHFileException("Invalid HFile version " + trailer.getMajorVersion());
    }
  } catch (Throwable t) {
    try {
      fsdis.close();
    } catch (Throwable t2) {
      LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
    }
    throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
  }
}
项目:c5    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:HBase-Research    文件:StoreFile.java   
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
    CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
    boolean closeIStream) throws IOException {
  super(path);

  FSDataInputStream in = hfileLink.open(fs);
  FSDataInputStream inNoChecksum = in;
  if (fs instanceof HFileSystem) {
    FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
    inNoChecksum = hfileLink.open(noChecksumFs);
  }

  reader = HFile.createReaderWithEncoding(fs, path, in, inNoChecksum,
              size, cacheConf, preferredEncodingInCache, closeIStream);
  bloomFilterType = BloomType.NONE;
}
项目:HBase-Research    文件:AbstractHFileReader.java   
protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
    final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
    final long fileSize,
    final boolean closeIStream,
    final CacheConfig cacheConf, final HFileSystem hfs) {
  super(null, path);
  this.trailer = trailer;
  this.compressAlgo = trailer.getCompressionCodec();
  this.cacheConf = cacheConf;
  this.fileSize = fileSize;
  this.istream = fsdis;
  this.closeIStream = closeIStream;
  this.path = path;
  this.name = path.getName();
  this.hfs = hfs;
  this.istreamNoFsChecksum = fsdisNoFsChecksum;
}
项目:HBase-Research    文件:HFile.java   
/**
 * Method returns the reader given the specified arguments.
 * TODO This is a bad abstraction.  See HBASE-6635.
 *
 * @param path hfile's path
 * @param fsdis an open checksummed stream of path's file
 * @param fsdisNoFsChecksum an open unchecksummed stream of path's file
 * @param size max size of the trailer.
 * @param closeIStream boolean for closing file after the getting the reader version.
 * @param cacheConf Cache configuation values, cannot be null.
 * @param preferredEncodingInCache
 * @param hfs
 * @return an appropriate instance of HFileReader
 * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
 */
private static Reader pickReaderVersion(Path path, FSDataInputStream fsdis,
    FSDataInputStream fsdisNoFsChecksum,
    long size, boolean closeIStream, CacheConfig cacheConf,
    DataBlockEncoding preferredEncodingInCache, HFileSystem hfs)
    throws IOException {
  FixedFileTrailer trailer = null;
  try {
    trailer = FixedFileTrailer.readFromStream(fsdis, size);
  } catch (IllegalArgumentException iae) {
    throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, iae);
  }
  switch (trailer.getMajorVersion()) {
  case 1:
    return new HFileReaderV1(path, trailer, fsdis, size, closeIStream,
        cacheConf);
  case 2:
    return new HFileReaderV2(path, trailer, fsdis, fsdisNoFsChecksum,
        size, closeIStream,
        cacheConf, preferredEncodingInCache, hfs);
  default:
    throw new CorruptHFileException("Invalid HFile version " + trailer.getMajorVersion());
  }
}
项目:HBase-Research    文件:HFileBlock.java   
public FSReaderV2(FSDataInputStream istream, 
    FSDataInputStream istreamNoFsChecksum, Algorithm compressAlgo,
    long fileSize, int minorVersion, HFileSystem hfs, Path path) 
  throws IOException {
  super(istream, istreamNoFsChecksum, compressAlgo, fileSize, 
        minorVersion, hfs, path);

  if (hfs != null) {
    // Check the configuration to determine whether hbase-level
    // checksum verification is needed or not.
    useHBaseChecksum = hfs.useHBaseChecksum();
  } else {
    // The configuration does not specify anything about hbase checksum
    // validations. Set it to true here assuming that we will verify
    // hbase checksums for all reads. For older files that do not have 
    // stored checksums, this flag will be reset later.
    useHBaseChecksum = true;
  }

  // for older versions, hbase did not store checksums.
  if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
    useHBaseChecksum = false;
  }
  this.useHBaseChecksumConfigured = useHBaseChecksum;
}
项目:HBase-Research    文件:TestCacheOnWrite.java   
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
  conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
      BLOOM_BLOCK_SIZE);
  conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.DATA));
  conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.LEAF_INDEX));
  conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
      cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
  cowType.modifyConf(conf);
  fs = HFileSystem.get(conf);
  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
}
项目:hbase-0.94.8-qod    文件:StoreFile.java   
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
    CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
    boolean closeIStream) throws IOException {
  super(path);

  FSDataInputStream in = hfileLink.open(fs);
  FSDataInputStream inNoChecksum = in;
  if (fs instanceof HFileSystem) {
    FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
    inNoChecksum = hfileLink.open(noChecksumFs);
  }

  reader = HFile.createReaderWithEncoding(fs, path, in, inNoChecksum,
              size, cacheConf, preferredEncodingInCache, closeIStream);
  bloomFilterType = BloomType.NONE;
}