Java 类org.apache.hadoop.hdfs.nfs.mount.Mountd 实例源码

项目:hadoop    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hops    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  Configuration config = new Configuration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-on-lustre2    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  Configuration config = new Configuration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop    文件:Nfs3.java   
public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
    boolean allowInsecurePorts) throws IOException {
  super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
      allowInsecurePorts), conf);
  mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
项目:hadoop    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}
项目:aliyun-oss-hadoop-fs    文件:Nfs3.java   
public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
    boolean allowInsecurePorts) throws IOException {
  super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
      allowInsecurePorts), conf);
  mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
项目:aliyun-oss-hadoop-fs    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}
项目:big-c    文件:Nfs3.java   
public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
    boolean allowInsecurePorts) throws IOException {
  super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
      allowInsecurePorts), conf);
  mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
项目:big-c    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Nfs3.java   
public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
    boolean allowInsecurePorts) throws IOException {
  super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
      allowInsecurePorts), conf);
  mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}
项目:hadoop-plus    文件:Nfs3.java   
public Nfs3(List<String> exports) throws IOException {
  super(new Mountd(exports), new RpcProgramNfs3(exports));
}
项目:hadoop-plus    文件:Nfs3.java   
public Nfs3(List<String> exports, Configuration config) throws IOException {
  super(new Mountd(exports, config), new RpcProgramNfs3(exports, config));
}
项目:hops    文件:Nfs3.java   
public Nfs3(Configuration conf) throws IOException {
  super(new RpcProgramNfs3(conf), conf);
  mountd = new Mountd(conf);
}
项目:hops    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}
项目:hadoop-TCP    文件:Nfs3.java   
public Nfs3(List<String> exports) throws IOException {
  super(new Mountd(exports), new RpcProgramNfs3());
}
项目:hadoop-TCP    文件:Nfs3.java   
public Nfs3(List<String> exports, Configuration config) throws IOException {
  super(new Mountd(exports, config), new RpcProgramNfs3(config), config);
}
项目:hardfs    文件:Nfs3.java   
public Nfs3(List<String> exports) throws IOException {
  super(new Mountd(exports), new RpcProgramNfs3());
}
项目:hardfs    文件:Nfs3.java   
public Nfs3(List<String> exports, Configuration config) throws IOException {
  super(new Mountd(exports, config), new RpcProgramNfs3(config), config);
}
项目:hadoop-on-lustre2    文件:Nfs3.java   
public Nfs3(Configuration conf) throws IOException {
  super(new RpcProgramNfs3(conf), conf);
  mountd = new Mountd(conf);
}
项目:hadoop-on-lustre2    文件:Nfs3.java   
public Mountd getMountd() {
  return mountd;
}