Java 类org.apache.hadoop.hdfs.util.ByteArrayManager 实例源码

项目:hadoop    文件:ClientContext.java   
private ClientContext(String name, Conf conf) {
  this.name = name;
  this.confString = confAsString(conf);
  this.shortCircuitCache = new ShortCircuitCache(
      conf.shortCircuitStreamsCacheSize,
      conf.shortCircuitStreamsCacheExpiryMs,
      conf.shortCircuitMmapCacheSize,
      conf.shortCircuitMmapCacheExpiryMs,
      conf.shortCircuitMmapCacheRetryTimeout,
      conf.shortCircuitCacheStaleThresholdMs,
      conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
  this.peerCache =
        new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
  this.keyProviderCache = new KeyProviderCache(conf.keyProviderCacheExpiryMs);
  this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
  this.domainSocketFactory = new DomainSocketFactory(conf);

  this.byteArrayManager = ByteArrayManager.newInstance(conf.writeByteArrayManagerConf);
}
项目:aliyun-oss-hadoop-fs    文件:ClientContext.java   
private ClientContext(String name, DfsClientConf conf) {
  final ShortCircuitConf scConf = conf.getShortCircuitConf();

  this.name = name;
  this.confString = scConf.confAsString();
  this.shortCircuitCache = ShortCircuitCache.fromConf(scConf);
  this.peerCache = new PeerCache(scConf.getSocketCacheCapacity(),
      scConf.getSocketCacheExpiry());
  this.keyProviderCache = new KeyProviderCache(
      scConf.getKeyProviderCacheExpiryMs());
  this.useLegacyBlockReaderLocal = scConf.isUseLegacyBlockReaderLocal();
  this.domainSocketFactory = new DomainSocketFactory(scConf);

  this.byteArrayManager = ByteArrayManager.newInstance(
      conf.getWriteByteArrayManagerConf());
}
项目:aliyun-oss-hadoop-fs    文件:DataStreamer.java   
private DataStreamer(HdfsFileStatus stat, ExtendedBlock block,
                     DFSClient dfsClient, String src,
                     Progressable progress, DataChecksum checksum,
                     AtomicReference<CachingStrategy> cachingStrategy,
                     ByteArrayManager byteArrayManage,
                     boolean isAppend, String[] favoredNodes) {
  this.block = block;
  this.dfsClient = dfsClient;
  this.src = src;
  this.progress = progress;
  this.stat = stat;
  this.checksum4WriteBlock = checksum;
  this.cachingStrategy = cachingStrategy;
  this.byteArrayManager = byteArrayManage;
  this.isLazyPersistFile = isLazyPersist(stat);
  this.isAppend = isAppend;
  this.favoredNodes = favoredNodes;

  final DfsClientConf conf = dfsClient.getConf();
  this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
  this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
  this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
}
项目:big-c    文件:ClientContext.java   
private ClientContext(String name, Conf conf) {
  this.name = name;
  this.confString = confAsString(conf);
  this.shortCircuitCache = new ShortCircuitCache(
      conf.shortCircuitStreamsCacheSize,
      conf.shortCircuitStreamsCacheExpiryMs,
      conf.shortCircuitMmapCacheSize,
      conf.shortCircuitMmapCacheExpiryMs,
      conf.shortCircuitMmapCacheRetryTimeout,
      conf.shortCircuitCacheStaleThresholdMs,
      conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
  this.peerCache =
        new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
  this.keyProviderCache = new KeyProviderCache(conf.keyProviderCacheExpiryMs);
  this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
  this.domainSocketFactory = new DomainSocketFactory(conf);

  this.byteArrayManager = ByteArrayManager.newInstance(conf.writeByteArrayManagerConf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientContext.java   
private ClientContext(String name, Conf conf) {
  this.name = name;
  this.confString = confAsString(conf);
  this.shortCircuitCache = new ShortCircuitCache(
      conf.shortCircuitStreamsCacheSize,
      conf.shortCircuitStreamsCacheExpiryMs,
      conf.shortCircuitMmapCacheSize,
      conf.shortCircuitMmapCacheExpiryMs,
      conf.shortCircuitMmapCacheRetryTimeout,
      conf.shortCircuitCacheStaleThresholdMs,
      conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
  this.peerCache =
        new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
  this.keyProviderCache = new KeyProviderCache(conf.keyProviderCacheExpiryMs);
  this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
  this.domainSocketFactory = new DomainSocketFactory(conf);

  this.byteArrayManager = ByteArrayManager.newInstance(conf.writeByteArrayManagerConf);
}
项目:FlexMap    文件:ClientContext.java   
private ClientContext(String name, Conf conf) {
  this.name = name;
  this.confString = confAsString(conf);
  this.shortCircuitCache = new ShortCircuitCache(
      conf.shortCircuitStreamsCacheSize,
      conf.shortCircuitStreamsCacheExpiryMs,
      conf.shortCircuitMmapCacheSize,
      conf.shortCircuitMmapCacheExpiryMs,
      conf.shortCircuitMmapCacheRetryTimeout,
      conf.shortCircuitCacheStaleThresholdMs,
      conf.shortCircuitSharedMemoryWatcherInterruptCheckMs);
  this.peerCache =
        new PeerCache(conf.socketCacheCapacity, conf.socketCacheExpiry);
  this.useLegacyBlockReaderLocal = conf.useLegacyBlockReaderLocal;
  this.domainSocketFactory = new DomainSocketFactory(conf);

  this.byteArrayManager = ByteArrayManager.newInstance(conf.writeByteArrayManagerConf);
}
项目:aliyun-oss-hadoop-fs    文件:DataStreamer.java   
/**
 * release a list of packets to ByteArrayManager
 *
 * @param packets packets to be release
 * @param bam ByteArrayManager
 */
private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
  for(DFSPacket p : packets) {
    p.releaseBuffer(bam);
  }
  packets.clear();
}
项目:aliyun-oss-hadoop-fs    文件:DataStreamer.java   
/**
 * construction with tracing info
 */
DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
             String src, Progressable progress, DataChecksum checksum,
             AtomicReference<CachingStrategy> cachingStrategy,
             ByteArrayManager byteArrayManage, String[] favoredNodes) {
  this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
      byteArrayManage, false, favoredNodes);
  stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
}
项目:aliyun-oss-hadoop-fs    文件:DataStreamer.java   
/**
 * Construct a data streamer for appending to the last partial block
 * @param lastBlock last block of the file to be appended
 * @param stat status of the file to be appended
 */
DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, DFSClient dfsClient,
             String src, Progressable progress, DataChecksum checksum,
             AtomicReference<CachingStrategy> cachingStrategy,
             ByteArrayManager byteArrayManage) {
  this(stat, lastBlock.getBlock(), dfsClient, src, progress, checksum, cachingStrategy,
      byteArrayManage, true, null);
  stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
  bytesSent = block.getNumBytes();
  accessToken = lastBlock.getBlockToken();
}
项目:aliyun-oss-hadoop-fs    文件:StripedDataStreamer.java   
StripedDataStreamer(HdfsFileStatus stat,
                    DFSClient dfsClient, String src,
                    Progressable progress, DataChecksum checksum,
                    AtomicReference<CachingStrategy> cachingStrategy,
                    ByteArrayManager byteArrayManage, String[] favoredNodes,
                    short index, Coordinator coordinator) {
  super(stat, null, dfsClient, src, progress, checksum, cachingStrategy,
      byteArrayManage, favoredNodes);
  this.index = index;
  this.coordinator = coordinator;
}
项目:hadoop    文件:DFSOutputStream.java   
private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
  for (DFSPacket p : packets) {
    p.releaseBuffer(bam);
  }
  packets.clear();
}
项目:hadoop    文件:ClientContext.java   
public ByteArrayManager getByteArrayManager() {
  return byteArrayManager;
}
项目:aliyun-oss-hadoop-fs    文件:ClientContext.java   
public ByteArrayManager getByteArrayManager() {
  return byteArrayManager;
}
项目:aliyun-oss-hadoop-fs    文件:DfsClientConf.java   
/**
 * @return the writeByteArrayManagerConf
 */
public ByteArrayManager.Conf getWriteByteArrayManagerConf() {
  return writeByteArrayManagerConf;
}
项目:aliyun-oss-hadoop-fs    文件:DFSPacket.java   
/**
 * Release the buffer in this packet to ByteArrayManager.
 */
synchronized void releaseBuffer(ByteArrayManager bam) {
  bam.release(buf);
  buf = null;
}
项目:big-c    文件:DFSOutputStream.java   
private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
  for (DFSPacket p : packets) {
    p.releaseBuffer(bam);
  }
  packets.clear();
}
项目:big-c    文件:ClientContext.java   
public ByteArrayManager getByteArrayManager() {
  return byteArrayManager;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSOutputStream.java   
private synchronized void releaseBuffer(ByteArrayManager bam) {
  bam.release(buf);
  buf = null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSOutputStream.java   
private static void releaseBuffer(List<Packet> packets, ByteArrayManager bam) {
  for(Packet p : packets) {
    p.releaseBuffer(bam);
  }
  packets.clear();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientContext.java   
public ByteArrayManager getByteArrayManager() {
  return byteArrayManager;
}
项目:FlexMap    文件:DFSOutputStream.java   
private void releaseBuffer(ByteArrayManager bam) {
  bam.release(buf);
  buf = null;
}
项目:FlexMap    文件:ClientContext.java   
public ByteArrayManager getByteArrayManager() {
  return byteArrayManager;
}
项目:hadoop    文件:DFSPacket.java   
/**
 * Release the buffer in this packet to ByteArrayManager.
 *
 * @param bam
 */
synchronized void releaseBuffer(ByteArrayManager bam) {
  bam.release(buf);
  buf = null;
}
项目:big-c    文件:DFSPacket.java   
/**
 * Release the buffer in this packet to ByteArrayManager.
 *
 * @param bam
 */
synchronized void releaseBuffer(ByteArrayManager bam) {
  bam.release(buf);
  buf = null;
}