Java 类org.apache.hadoop.hdfs.server.namenode.JspHelper 实例源码

项目:hadoop-on-lustre    文件:HdfsProxy.java   
private void initialize(Configuration conf) throws IOException {
  sslAddr = getSslAddr(conf);
  String nn = conf.get("hdfsproxy.dfs.namenode.address");
  if (nn == null)
    throw new IOException("HDFS NameNode address is not specified");
  InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
  LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());

  Configuration sslConf = new Configuration(false);
  sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
      "ssl-server.xml"));
  // unit testing
  sslConf.set("proxy.http.test.listener.addr",
              conf.get("proxy.http.test.listener.addr"));

  this.server = new ProxyHttpServer(sslAddr, sslConf);
  this.server.setAttribute("proxy.https.port", server.getPort());
  this.server.setAttribute("name.node.address", nnAddr);
  this.server.setAttribute(JspHelper.CURRENT_CONF, new Configuration());
  this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
  this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
  this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
  this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
}
项目:hortonworks-extension    文件:HdfsProxy.java   
private void initialize(Configuration conf) throws IOException {
  sslAddr = getSslAddr(conf);
  String nn = conf.get("hdfsproxy.dfs.namenode.address");
  if (nn == null)
    throw new IOException("HDFS NameNode address is not specified");
  InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
  LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());

  Configuration sslConf = new Configuration(false);
  sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
      "ssl-server.xml"));
  // unit testing
  sslConf.set("proxy.http.test.listener.addr",
              conf.get("proxy.http.test.listener.addr"));

  this.server = new ProxyHttpServer(sslAddr, sslConf);
  this.server.setAttribute("proxy.https.port", server.getPort());
  this.server.setAttribute("name.node.address", nnAddr);
  this.server.setAttribute(JspHelper.CURRENT_CONF, new Configuration());
  this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
  this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
  this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
  this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
}
项目:hortonworks-extension    文件:HdfsProxy.java   
private void initialize(Configuration conf) throws IOException {
  sslAddr = getSslAddr(conf);
  String nn = conf.get("hdfsproxy.dfs.namenode.address");
  if (nn == null)
    throw new IOException("HDFS NameNode address is not specified");
  InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
  LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());

  Configuration sslConf = new Configuration(false);
  sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
      "ssl-server.xml"));
  // unit testing
  sslConf.set("proxy.http.test.listener.addr",
              conf.get("proxy.http.test.listener.addr"));

  this.server = new ProxyHttpServer(sslAddr, sslConf);
  this.server.setAttribute("proxy.https.port", server.getPort());
  this.server.setAttribute("name.node.address", nnAddr);
  this.server.setAttribute(JspHelper.CURRENT_CONF, new Configuration());
  this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
  this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
  this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
  this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
}
项目:hortonworks-extension    文件:HdfsProxy.java   
private void initialize(Configuration conf) throws IOException {
  sslAddr = getSslAddr(conf);
  String nn = conf.get("hdfsproxy.dfs.namenode.address");
  if (nn == null)
    throw new IOException("HDFS NameNode address is not specified");
  InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
  LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());

  Configuration sslConf = new Configuration(false);
  sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
      "ssl-server.xml"));
  // unit testing
  sslConf.set("proxy.http.test.listener.addr",
              conf.get("proxy.http.test.listener.addr"));

  this.server = new ProxyHttpServer(sslAddr, sslConf);
  this.server.setAttribute("proxy.https.port", server.getPort());
  this.server.setAttribute("name.node.address", nnAddr);
  this.server.setAttribute(JspHelper.CURRENT_CONF, new Configuration());
  this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
  this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
  this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
  this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
}
项目:hadoop-EAR    文件:JournalNodeHttpServer.java   
void start() throws IOException {
  final InetSocketAddress bindAddr = JournalConfigHelper.getAddress(conf);

  // initialize the webserver for uploading/downloading files.
  LOG.info("Starting web server at: " + bindAddr);

  int tmpInfoPort = bindAddr.getPort();

  this.httpServer = new HttpServer("qjm", bindAddr.getAddress().getHostAddress(), tmpInfoPort,
      tmpInfoPort == 0, conf);

  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class);
  httpServer.addInternalServlet("getJournalManifest", "/getJournalManifest",
      GetJournalManifestServlet.class);
  httpServer.addInternalServlet("journalStats", "/journalStats",
      JournalStatsServlet.class);
  httpServer.addInternalServlet("uploadImage", "/uploadImage",
      UploadImageServlet.class);
  httpServer.addInternalServlet("getImage", "/getImage",
      GetJournalImageServlet.class);
  httpServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  this.infoPort = httpServer.getPort();
  this.httpAddress = new InetSocketAddress(bindAddr.getAddress(), infoPort);

  LOG.info("Journal Web-server up at: " + bindAddr + ":" + infoPort);
}
项目:hadoop-on-lustre    文件:ProxyStreamFile.java   
@Override
protected DFSClient getDFSClient(HttpServletRequest request) throws IOException, InterruptedException {
  ServletContext context = getServletContext();
  Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
  UserGroupInformation ugi = getUGI(request, conf);
  final InetSocketAddress nameNodeAddr = (InetSocketAddress) context.getAttribute("name.node.address");

  return JspHelper.getDFSClient(ugi, nameNodeAddr, conf);
}
项目:hadoop-on-lustre    文件:ProxyFileDataServlet.java   
/** {@inheritDoc} */
@Override
protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException,
    URISyntaxException {

  String dtParam="";
  if (dt != null) {
    dtParam=JspHelper.getDelegationTokenUrlParam(dt);
  }

  return new URI(request.getScheme(), null, request.getServerName(), request
      .getServerPort(), "/streamFile" + i.getFullName(parent),
       "&ugi=" + ugi.getShortUserName() + dtParam, null);
}
项目:hadoop-on-lustre    文件:UserProvider.java   
@Override
public UserGroupInformation getValue(final HttpContext context) {
  final Configuration conf = (Configuration) servletcontext
      .getAttribute(JspHelper.CURRENT_CONF);
  try {
    return JspHelper.getUGI(servletcontext, request, conf,
        AuthenticationMethod.KERBEROS, false);
  } catch (IOException e) {
    throw new SecurityException(
        "Failed to obtain user group information: " + e, e);
  }
}
项目:hadoop-on-lustre    文件:WebHdfsFileSystem.java   
private String addDt2Query(String query) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken != null) {
        final String encoded = delegationToken.encodeToUrlString();
        return query + JspHelper.getDelegationTokenUrlParam(encoded);
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hadoop-on-lustre    文件:HftpFileSystem.java   
protected String updateQuery(String query) throws IOException {
  String tokenString = null;
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken == null && !remoteIsInsecure) {
        initDelegationToken();
      }    
      if (delegationToken != null) {
        tokenString = delegationToken.encodeToUrlString();
        return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hadoop-on-lustre    文件:NamenodeWebHdfsMethods.java   
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize) throws IOException {
  final FSNamesystem ns = namenode.getNamesystem();

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client
    final DatanodeInfo dn = ns.chooseDatanode(getRemoteAddress(), blocksize);
    if (dn != null) {
      return dn;
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica
    final HdfsFileStatus status = namenode.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = namenode.getBlockLocations(
          path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return JspHelper.bestNode(locations.get(0));
      }
    }
  } 

  return ns.getRandomDatanode();
}
项目:hortonworks-extension    文件:ProxyStreamFile.java   
@Override
protected DFSClient getDFSClient(HttpServletRequest request) throws IOException, InterruptedException {
  ServletContext context = getServletContext();
  Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
  UserGroupInformation ugi = getUGI(request, conf);
  final InetSocketAddress nameNodeAddr = (InetSocketAddress) context.getAttribute("name.node.address");

  return JspHelper.getDFSClient(ugi, nameNodeAddr, conf);
}
项目:hortonworks-extension    文件:ProxyFileDataServlet.java   
/** {@inheritDoc} */
@Override
protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException,
    URISyntaxException {

  String dtParam="";
  if (dt != null) {
    dtParam=JspHelper.getDelegationTokenUrlParam(dt);
  }

  return new URI(request.getScheme(), null, request.getServerName(), request
      .getServerPort(), "/streamFile" + i.getFullName(parent),
       "&ugi=" + ugi.getShortUserName() + dtParam, null);
}
项目:hortonworks-extension    文件:UserProvider.java   
@Override
public UserGroupInformation getValue(final HttpContext context) {
  final Configuration conf = (Configuration) servletcontext
      .getAttribute(JspHelper.CURRENT_CONF);
  try {
    return JspHelper.getUGI(servletcontext, request, conf,
        AuthenticationMethod.KERBEROS, false);
  } catch (IOException e) {
    throw new SecurityException(
        "Failed to obtain user group information: " + e, e);
  }
}
项目:hortonworks-extension    文件:WebHdfsFileSystem.java   
private String addDt2Query(String query) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken != null) {
        final String encoded = delegationToken.encodeToUrlString();
        return query + JspHelper.getDelegationTokenUrlParam(encoded);
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hortonworks-extension    文件:HftpFileSystem.java   
protected String updateQuery(String query) throws IOException {
  String tokenString = null;
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken == null && !remoteIsInsecure) {
        initDelegationToken();
      }    
      if (delegationToken != null) {
        tokenString = delegationToken.encodeToUrlString();
        return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hortonworks-extension    文件:NamenodeWebHdfsMethods.java   
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize) throws IOException {
  final FSNamesystem ns = namenode.getNamesystem();

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client
    final DatanodeInfo dn = ns.chooseDatanode(getRemoteAddress(), blocksize);
    if (dn != null) {
      return dn;
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica
    final HdfsFileStatus status = namenode.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = namenode.getBlockLocations(
          path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return JspHelper.bestNode(locations.get(0));
      }
    }
  } 

  return ns.getRandomDatanode();
}
项目:hortonworks-extension    文件:ProxyStreamFile.java   
@Override
protected DFSClient getDFSClient(HttpServletRequest request) throws IOException, InterruptedException {
  ServletContext context = getServletContext();
  Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
  UserGroupInformation ugi = getUGI(request, conf);
  final InetSocketAddress nameNodeAddr = (InetSocketAddress) context.getAttribute("name.node.address");

  return JspHelper.getDFSClient(ugi, nameNodeAddr, conf);
}
项目:hortonworks-extension    文件:ProxyFileDataServlet.java   
/** {@inheritDoc} */
@Override
protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException,
    URISyntaxException {

  String dtParam="";
  if (dt != null) {
    dtParam=JspHelper.getDelegationTokenUrlParam(dt);
  }

  return new URI(request.getScheme(), null, request.getServerName(), request
      .getServerPort(), "/streamFile" + i.getFullName(parent),
       "&ugi=" + ugi.getShortUserName() + dtParam, null);
}
项目:hortonworks-extension    文件:UserProvider.java   
@Override
public UserGroupInformation getValue(final HttpContext context) {
  final Configuration conf = (Configuration) servletcontext
      .getAttribute(JspHelper.CURRENT_CONF);
  try {
    return JspHelper.getUGI(servletcontext, request, conf,
        AuthenticationMethod.KERBEROS, false);
  } catch (IOException e) {
    throw new SecurityException(
        "Failed to obtain user group information: " + e, e);
  }
}
项目:hortonworks-extension    文件:WebHdfsFileSystem.java   
private String addDt2Query(String query) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken != null) {
        final String encoded = delegationToken.encodeToUrlString();
        return query + JspHelper.getDelegationTokenUrlParam(encoded);
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hortonworks-extension    文件:HftpFileSystem.java   
protected String updateQuery(String query) throws IOException {
  String tokenString = null;
  if (UserGroupInformation.isSecurityEnabled()) {
    synchronized (this) {
      if (delegationToken == null && !remoteIsInsecure) {
        initDelegationToken();
      }    
      if (delegationToken != null) {
        tokenString = delegationToken.encodeToUrlString();
        return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
      } // else we are talking to an insecure cluster
    }
  }
  return query;
}
项目:hortonworks-extension    文件:NamenodeWebHdfsMethods.java   
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize) throws IOException {
  final FSNamesystem ns = namenode.getNamesystem();

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client
    final DatanodeInfo dn = ns.chooseDatanode(getRemoteAddress(), blocksize);
    if (dn != null) {
      return dn;
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica
    final HdfsFileStatus status = namenode.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = namenode.getBlockLocations(
          path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return JspHelper.bestNode(locations.get(0));
      }
    }
  } 

  return ns.getRandomDatanode();
}
项目:hortonworks-extension    文件:ProxyStreamFile.java   
@Override
protected DFSClient getDFSClient(HttpServletRequest request) throws IOException, InterruptedException {
  ServletContext context = getServletContext();
  Configuration conf = (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
  UserGroupInformation ugi = getUGI(request, conf);
  final InetSocketAddress nameNodeAddr = (InetSocketAddress) context.getAttribute("name.node.address");

  return JspHelper.getDFSClient(ugi, nameNodeAddr, conf);
}
项目:hortonworks-extension    文件:ProxyFileDataServlet.java   
/** {@inheritDoc} */
@Override
protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException,
    URISyntaxException {

  String dtParam="";
  if (dt != null) {
    dtParam=JspHelper.getDelegationTokenUrlParam(dt);
  }

  return new URI(request.getScheme(), null, request.getServerName(), request
      .getServerPort(), "/streamFile" + i.getFullName(parent),
       "&ugi=" + ugi.getShortUserName() + dtParam, null);
}
项目:hadoop-EAR    文件:TestMissingBlocksAlert.java   
public void testMissingBlocksAlert() throws IOException, 
                                     InterruptedException {

  MiniDFSCluster cluster = null;

  try {
    Configuration conf = new Configuration();
    //minimize test delay
    conf.setInt("dfs.replication.interval", 0);
    int fileLen = 10*1024;

    //start a cluster with single datanode
    cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();

    DistributedFileSystem dfs = 
                          (DistributedFileSystem) cluster.getFileSystem();

    // create a normal file
    DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"), 
                           fileLen, (short)3, 0);

    Path corruptFile = new Path("/testMissingBlocks/corruptFile");
    DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);


    // Corrupt the block
    assertTrue(TestDatanodeBlockScanner.corruptReplica(
        DFSTestUtil.getFirstBlock(dfs, corruptFile), 0, cluster));

    // read the file so that the corrupt block is reported to NN
    FSDataInputStream in = dfs.open(corruptFile); 
    try {
      in.readFully(new byte[fileLen]);
      fail("Did not get a checksum exception");
    } catch (ChecksumException ignored) { // checksum error is expected.      
    }
    in.close();

    LOG.info("Waiting for missing blocks count to increase...");

    while (dfs.getMissingBlocksCount() <= 0) {
      Thread.sleep(100);
    }
    assertTrue(dfs.getMissingBlocksCount() == 1);


    // Now verify that it shows up on webui
    URL url = new URL("http://" + conf.get("dfs.http.address") + 
                      "/dfshealth.jsp");
    String dfsFrontPage = DFSTestUtil.urlGet(url);
    String warnStr = JspHelper.getMissingBlockWarningText(1);
    assertTrue("HDFS Front page does not contain expected warning", 
               dfsFrontPage.contains(warnStr));

    // now do the reverse : remove the file expect the number of missing 
    // blocks to go to zero

    dfs.delete(corruptFile, true);

    LOG.info("Waiting for missing blocks count to be zero...");
    while (dfs.getMissingBlocksCount() > 0) {
      Thread.sleep(100);
    }

    // and make sure WARNING disappears
    // Now verify that it shows up on webui
    dfsFrontPage = DFSTestUtil.urlGet(url);
    assertFalse("HDFS Front page contains unexpected warning", 
                dfsFrontPage.contains(warnStr));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-EAR    文件:TestRaidUI.java   
@Test
public void testRaidUI() throws Exception {
  Configuration localConf = new Configuration(conf);
  cnode = RaidNode.createRaidNode(null, localConf);
  InetSocketAddress infoSocAddr = dfsCluster.getNameNode().getHttpAddress();
  InjectionHandler h = new TestRaidHTTPInjectionHandler();
  InjectionHandler.set(h);

  LOG.info("First call will fail with timeout because RaidNode UI will " +  
      "hang for 10 seconds. Check TestRaidHTTPInjectionHandler when "  +
      "counter == 1");
  long stime = System.currentTimeMillis();
  String httpContent = DFSUtil.getHTMLContent(
      new URI("http", null, infoSocAddr.getHostName(), 
          infoSocAddr.getPort(), "/dfshealth.jsp", null, null));
  LOG.info("Output1: " + httpContent);
  long duration = System.currentTimeMillis() - stime;
  long expectTimeout = JspHelper.RAID_UI_CONNECT_TIMEOUT +
      JspHelper.RAID_UI_READ_TIMEOUT;
  assertTrue("Should take less than " + expectTimeout + "ms actual time: " +
      duration, duration < expectTimeout + 1000);
  assertTrue("Should get timeout error", 
      httpContent.contains("Raidnode didn't response"));
  assertFalse("Shouldn't get right result", 
      httpContent.contains("WARNING Corrupt files"));

  LOG.info("Second call will fail with error because RaidNode UI throw " + 
      "an IOException. Check TestRaidHTTPInjectionHandler when counter == 2");
  httpContent = DFSUtil.getHTMLContent(
      new URI("http", null, infoSocAddr.getHostName(), 
          infoSocAddr.getPort(), "/dfshealth.jsp", null, null));
  LOG.info("Output2: " + httpContent);
  assertTrue("Should get error", 
      httpContent.contains("Raidnode is unreachable"));
  assertFalse("Shouldn't get right result", 
      httpContent.contains("WARNING Corrupt files"));

  LOG.info("Third call will succeed");
  httpContent = DFSUtil.getHTMLContent(
      new URI("http", null, infoSocAddr.getHostName(), 
          infoSocAddr.getPort(), "/dfshealth.jsp", null, null));
  LOG.info("Output3: " + httpContent);
  assertTrue("Should get right result", 
      httpContent.contains("WARNING Corrupt files"));
}
项目:hadoop-EAR    文件:GetJournalImageServlet.java   
@Override
public void doGet(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  try {
    final GetImageParams parsedParams = new GetImageParams(request, response);

    // here we only support getImage
    if (!parsedParams.isGetImage()) {
      throw new IOException("Only getImage requests are supported");
    }

    // get access to journal node storage
    final ServletContext context = getServletContext();
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final String journalId = request.getParameter(JOURNAL_ID_PARAM);
    QuorumJournalManager.checkJournalId(journalId);

    final Journal journal = JournalNodeHttpServer.getJournalFromContext(
        context, journalId);
    final JNStorage imageStorage = journal.getImageStorage();

    final JournalMetrics metrics = journal.getMetrics();
    if (metrics != null) {
      metrics.numGetImageDoGet.inc();
    }

    // Check that the namespace info is correct
    if (!GetJournalEditServlet.checkStorageInfoOrSendError(imageStorage,
        request, response)) {
      return;
    }

    // we will serve image at txid
    long txid = parsedParams.getTxId();
    File imageFile = imageStorage.getImageFile(txid);

    // no such image in the storage
    if (imageFile == null) {
      throw new IOException("Could not find image with txid " + txid);
    }

    // set verification headers 
    setVerificationHeaders(response, imageFile);

    // send fsImage
    TransferFsImage.getFileServer(response.getOutputStream(), imageFile,
        GetImageServlet.getThrottler(conf, parsedParams.isThrottlerDisabled()));

  } catch (Throwable t) {
    GetJournalEditServlet.handleFailure(t, response, "getImage");
  } 
}
项目:hadoop-EAR    文件:JournalNodeHttpServer.java   
public static Configuration getConfFromContext(ServletContext context) {
  return (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
}
项目:RDFS    文件:TestMissingBlocksAlert.java   
public void testMissingBlocksAlert() throws IOException, 
                                     InterruptedException {

  MiniDFSCluster cluster = null;

  try {
    Configuration conf = new Configuration();
    //minimize test delay
    conf.setInt("dfs.replication.interval", 0);
    int fileLen = 10*1024;

    //start a cluster with single datanode
    cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();

    DistributedFileSystem dfs = 
                          (DistributedFileSystem) cluster.getFileSystem();

    // create a normal file
    DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"), 
                           fileLen, (short)3, 0);

    Path corruptFile = new Path("/testMissingBlocks/corruptFile");
    DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);


    // Corrupt the block
    String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getBlockName();
    assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0, cluster));

    // read the file so that the corrupt block is reported to NN
    FSDataInputStream in = dfs.open(corruptFile); 
    try {
      in.readFully(new byte[fileLen]);
      fail("Did not get a checksum exception");
    } catch (ChecksumException ignored) { // checksum error is expected.      
    }
    in.close();

    LOG.info("Waiting for missing blocks count to increase...");

    while (dfs.getMissingBlocksCount() <= 0) {
      Thread.sleep(100);
    }
    assertTrue(dfs.getMissingBlocksCount() == 1);


    // Now verify that it shows up on webui
    URL url = new URL("http://" + conf.get("dfs.http.address") + 
                      "/dfshealth.jsp");
    String dfsFrontPage = DFSTestUtil.urlGet(url);
    String warnStr = JspHelper.getMissingBlockWarningText(1);
    assertTrue("HDFS Front page does not contain expected warning", 
               dfsFrontPage.contains(warnStr));

    // now do the reverse : remove the file expect the number of missing 
    // blocks to go to zero

    dfs.delete(corruptFile, true);

    LOG.info("Waiting for missing blocks count to be zero...");
    while (dfs.getMissingBlocksCount() > 0) {
      Thread.sleep(100);
    }

    // and make sure WARNING disappears
    // Now verify that it shows up on webui
    dfsFrontPage = DFSTestUtil.urlGet(url);
    assertFalse("HDFS Front page contains unexpected warning", 
                dfsFrontPage.contains(warnStr));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}