Java 类org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator 实例源码

项目:hadoop    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:aliyun-oss-hadoop-fs    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:big-c    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:FlexMap    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:hadoop-on-lustre2    文件:TestFsDatasetCache.java   
/**
 * Run testCacheAndUncacheBlock with some failures injected into the mlock
 * call.  This tests the ability of the NameNode to resend commands.
 */
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
  // We don't have to save the previous cacheManipulator
  // because it will be reinstalled by the @After function.
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
    private final Set<String> seenIdentifiers = new HashSet<String>();

    @Override
    public void mlock(String identifier,
        ByteBuffer mmap, long length) throws IOException {
      if (seenIdentifiers.contains(identifier)) {
        // mlock succeeds the second time.
        LOG.info("mlocking " + identifier);
        return;
      }
      seenIdentifiers.add(identifier);
      throw new IOException("injecting IOException during mlock of " +
          identifier);
    }
  });
  testCacheAndUncacheBlock();
}
项目:hadoop    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:hadoop    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:hadoop    文件:TestFsDatasetCacheRevocation.java   
@Before
public void setUp() throws Exception {
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  DomainSocket.disableBindPathValidation();
  sockDir = new TemporarySocketDirectory();
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:aliyun-oss-hadoop-fs    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:aliyun-oss-hadoop-fs    文件:TestFsDatasetCacheRevocation.java   
@Before
public void setUp() throws Exception {
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  DomainSocket.disableBindPathValidation();
  sockDir = new TemporarySocketDirectory();
}
项目:big-c    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:big-c    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:big-c    文件:TestFsDatasetCacheRevocation.java   
@Before
public void setUp() throws Exception {
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  DomainSocket.disableBindPathValidation();
  sockDir = new TemporarySocketDirectory();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFsDatasetCacheRevocation.java   
@Before
public void setUp() throws Exception {
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  DomainSocket.disableBindPathValidation();
  sockDir = new TemporarySocketDirectory();
}
项目:FlexMap    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:FlexMap    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}
项目:FlexMap    文件:TestFsDatasetCacheRevocation.java   
@Before
public void setUp() throws Exception {
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  DomainSocket.disableBindPathValidation();
  sockDir = new TemporarySocketDirectory();
}
项目:hadoop-on-lustre2    文件:TestCacheDirectives.java   
@Before
public void setup() throws Exception {
  conf = createCachingConf();
  cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
  cluster.waitActive();
  dfs = cluster.getFileSystem();
  proto = cluster.getNameNodeRpc();
  namenode = cluster.getNameNode();
  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
  BlockReaderTestUtil.enableHdfsCachingTracing();
}
项目:hadoop-on-lustre2    文件:TestFsDatasetCache.java   
@Before
public void setUp() throws Exception {
  assumeTrue(!Path.WINDOWS);
  conf = new HdfsConfiguration();
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
  conf.setLong(DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
      CACHE_CAPACITY);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);

  prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
  NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());

  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  fs = cluster.getFileSystem();
  nn = cluster.getNameNode();
  fsImage = nn.getFSImage();
  dn = cluster.getDataNodes().get(0);
  fsd = dn.getFSDataset();

  spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);

}