Java 类org.apache.hadoop.hdfs.web.JsonUtil 实例源码

项目:hadoop    文件:WebHdfsHandler.java   
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
  MD5MD5CRC32FileChecksum checksum = null;
  final String nnId = params.namenodeId();
  DFSClient dfsclient = newDfsClient(nnId, conf);
  try {
    checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
    dfsclient.close();
    dfsclient = null;
  } finally {
    IOUtils.cleanup(LOG, dfsclient);
  }
  final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  resp.headers().set(CONNECTION, CLOSE);
  ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:hadoop    文件:FSImageHandler.java   
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
        throws Exception {
  Exception e = cause instanceof Exception ? (Exception) cause : new
    Exception(cause);
  final String output = JsonUtil.toJsonString(e);
  ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
  final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
          HTTP_1_1, INTERNAL_SERVER_ERROR, content);

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  if (e instanceof IllegalArgumentException) {
    resp.setStatus(BAD_REQUEST);
  } else if (e instanceof FileNotFoundException) {
    resp.setStatus(NOT_FOUND);
  }

  resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
  resp.headers().set(CONNECTION, CLOSE);
  ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:aliyun-oss-hadoop-fs    文件:WebHdfsHandler.java   
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
  MD5MD5CRC32FileChecksum checksum = null;
  final String nnId = params.namenodeId();
  DFSClient dfsclient = newDfsClient(nnId, conf);
  try {
    checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
    dfsclient.close();
    dfsclient = null;
  } finally {
    IOUtils.cleanup(LOG, dfsclient);
  }
  final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  resp.headers().set(CONNECTION, CLOSE);
  ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageHandler.java   
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
        throws Exception {
  Exception e = cause instanceof Exception ? (Exception) cause : new
    Exception(cause);
  final String output = JsonUtil.toJsonString(e);
  ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
  final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
          HTTP_1_1, INTERNAL_SERVER_ERROR, content);

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  if (e instanceof IllegalArgumentException) {
    resp.setStatus(BAD_REQUEST);
  } else if (e instanceof FileNotFoundException) {
    resp.setStatus(NOT_FOUND);
  } else if (e instanceof IOException) {
    resp.setStatus(FORBIDDEN);
  }
  resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
  resp.headers().set(CONNECTION, CLOSE);
  ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:big-c    文件:WebHdfsHandler.java   
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
  MD5MD5CRC32FileChecksum checksum = null;
  final String nnId = params.namenodeId();
  DFSClient dfsclient = newDfsClient(nnId, conf);
  try {
    checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
    dfsclient.close();
    dfsclient = null;
  } finally {
    IOUtils.cleanup(LOG, dfsclient);
  }
  final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  resp.headers().set(CONNECTION, CLOSE);
  ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:big-c    文件:FSImageHandler.java   
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
        throws Exception {
  Exception e = cause instanceof Exception ? (Exception) cause : new
    Exception(cause);
  final String output = JsonUtil.toJsonString(e);
  ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
  final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
          HTTP_1_1, INTERNAL_SERVER_ERROR, content);

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  if (e instanceof IllegalArgumentException) {
    resp.setStatus(BAD_REQUEST);
  } else if (e instanceof FileNotFoundException) {
    resp.setStatus(NOT_FOUND);
  }

  resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
  resp.headers().set(CONNECTION, CLOSE);
  ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
项目:hadoop-plus    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hops    文件:NamenodeWebHdfsMethods.java   
private Response delete(final UserGroupInformation ugi,
    final DelegationParam delegation, final UserParam username,
    final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op,
    final RecursiveParam recursive) throws IOException {
  final NameNode namenode = (NameNode) context.getAttribute("name.node");

  switch (op.getValue()) {
    case DELETE: {
      final boolean b =
          namenode.getRpcServer().delete(fullpath, recursive.getValue());
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop-TCP    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hardfs    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = namenode.getRpcServer().delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop-on-lustre2    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");

  switch(op.getValue()) {
  case DELETE:
  {
    final boolean b = getRPCServer(namenode).delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop    文件:NamenodeWebHdfsMethods.java   
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hadoop    文件:FSImageLoader.java   
/**
 * Return the JSON formatted XAttrs of the specified file.
 *
 * @param path
 *          a path specifies a file
 * @return JSON formatted XAttrs
 * @throws IOException
 *           if failed to serialize fileStatus to JSON.
 */
String getXAttrs(String path, List<String> names, String encoder)
        throws IOException {

  List<XAttr> xAttrs = getXAttrList(path);
  List<XAttr> filtered;
  if (names == null || names.size() == 0) {
    filtered = xAttrs;
  } else {
    filtered = Lists.newArrayListWithCapacity(names.size());
    for (String name : names) {
      XAttr search = XAttrHelper.buildXAttr(name);

      boolean found = false;
      for (XAttr aXAttr : xAttrs) {
        if (aXAttr.getNameSpace() == search.getNameSpace()
                && aXAttr.getName().equals(search.getName())) {

          filtered.add(aXAttr);
          found = true;
          break;
        }
      }

      if (!found) {
        throw new IOException(
                "At least one of the attributes provided was not found.");
      }
    }

  }
  return JsonUtil.toJsonString(filtered,
          new XAttrEncodingParam(encoder).getEncoding());
}
项目:hadoop    文件:FSImageLoader.java   
/**
 * Return the JSON formatted ACL status of the specified file.
 * @param path a path specifies a file
 * @return JSON formatted AclStatus
 * @throws IOException if failed to serialize fileStatus to JSON.
 */
String getAclStatus(String path) throws IOException {
  PermissionStatus p = getPermissionStatus(path);
  List<AclEntry> aclEntryList = getAclEntryList(path);
  FsPermission permission = p.getPermission();
  AclStatus.Builder builder = new AclStatus.Builder();
  builder.owner(p.getUserName()).group(p.getGroupName())
      .addEntries(aclEntryList).setPermission(permission)
      .stickyBit(permission.getStickyBit());
  AclStatus aclStatus = builder.build();
  return JsonUtil.toJsonString(aclStatus);
}
项目:aliyun-oss-hadoop-fs    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageLoader.java   
/**
 * Return the JSON formatted XAttrs of the specified file.
 *
 * @param path
 *          a path specifies a file
 * @return JSON formatted XAttrs
 * @throws IOException
 *           if failed to serialize fileStatus to JSON.
 */
String getXAttrs(String path, List<String> names, String encoder)
    throws IOException {

  List<XAttr> xAttrs = getXAttrList(path);
  List<XAttr> filtered;
  if (names == null || names.size() == 0) {
    filtered = xAttrs;
  } else {
    filtered = Lists.newArrayListWithCapacity(names.size());
    for (String name : names) {
      XAttr search = XAttrHelper.buildXAttr(name);

      boolean found = false;
      for (XAttr aXAttr : xAttrs) {
        if (aXAttr.getNameSpace() == search.getNameSpace()
            && aXAttr.getName().equals(search.getName())) {

          filtered.add(aXAttr);
          found = true;
          break;
        }
      }

      if (!found) {
        throw new IOException(
            "At least one of the attributes provided was not found.");
      }
    }

  }
  return JsonUtil.toJsonString(filtered,
      new XAttrEncodingParam(encoder).getEncoding());
}
项目:aliyun-oss-hadoop-fs    文件:FSImageLoader.java   
/**
 * Return the JSON formatted ACL status of the specified file.
 * @param path a path specifies a file
 * @return JSON formatted AclStatus
 * @throws IOException if failed to serialize fileStatus to JSON.
 */
String getAclStatus(String path) throws IOException {
  PermissionStatus p = getPermissionStatus(path);
  List<AclEntry> aclEntryList = getAclEntryList(path);
  FsPermission permission = p.getPermission();
  AclStatus.Builder builder = new AclStatus.Builder();
  builder.owner(p.getUserName()).group(p.getGroupName())
      .addEntries(aclEntryList).setPermission(permission)
      .stickyBit(permission.getStickyBit());
  AclStatus aclStatus = builder.build();
  return JsonUtil.toJsonString(aclStatus);
}
项目:aliyun-oss-hadoop-fs    文件:TestOfflineImageViewerForXAttr.java   
/**
 * Create a populated namespace for later testing. Save its contents to a data
 * structure and store its fsimage location. We only want to generate the
 * fsimage file once and use it for multiple tests.
 */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();
    // Create a name space with XAttributes
    Path dir = new Path("/dir1");
    hdfs.mkdirs(dir);
    hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
    hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
    // Write results to the fsimage file
    hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    List<XAttr> attributes = new ArrayList<XAttr>();
    attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));

    attr1JSon = JsonUtil.toJsonString(attributes, null);

    attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));

    // Determine the location of the fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
项目:big-c    文件:NamenodeWebHdfsMethods.java   
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:big-c    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:big-c    文件:FSImageLoader.java   
/**
 * Return the JSON formatted ACL status of the specified file.
 * @param path a path specifies a file
 * @return JSON formatted AclStatus
 * @throws IOException if failed to serialize fileStatus to JSON.
 */
String getAclStatus(String path) throws IOException {
  PermissionStatus p = getPermissionStatus(path);
  List<AclEntry> aclEntryList = getAclEntryList(path);
  FsPermission permission = p.getPermission();
  AclStatus.Builder builder = new AclStatus.Builder();
  builder.owner(p.getUserName()).group(p.getGroupName())
      .addEntries(aclEntryList).setPermission(permission)
      .stickyBit(permission.getStickyBit());
  AclStatus aclStatus = builder.build();
  return JsonUtil.toJsonString(aclStatus);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:FlexMap    文件:NamenodeWebHdfsMethods.java   
private Response delete(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final DeleteOpParam op,
    final RecursiveParam recursive,
    final SnapshotNameParam snapshotName
    ) throws IOException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case DELETE: {
    final boolean b = np.delete(fullpath, recursive.getValue());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  case DELETESNAPSHOT: {
    np.deleteSnapshot(fullpath, snapshotName.getValue());
    return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:hops    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NamenodeProtocols np,
    final String p) throws IOException {
  final DirectoryListing first =
      getDirectoryListing(np, p, HdfsFileStatus.EMPTY_NAME);

  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out =
          new PrintWriter(new OutputStreamWriter(outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\"" +
          FileStatus.class.getSimpleName() + "\":[");

      final HdfsFileStatus[] partial = first.getPartialListing();
      if (partial.length > 0) {
        out.print(JsonUtil.toJsonString(partial[0], false));
      }
      for (int i = 1; i < partial.length; i++) {
        out.println(',');
        out.print(JsonUtil.toJsonString(partial[i], false));
      }

      for (DirectoryListing curr = first; curr.hasMore(); ) {
        curr = getDirectoryListing(np, p, curr.getLastName());
        for (HdfsFileStatus s : curr.getPartialListing()) {
          out.println(',');
          out.print(JsonUtil.toJsonString(s, false));
        }
      }

      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}
项目:hadoop-on-lustre    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NameNode np, 
    final String p) throws IOException {
  final DirectoryListing first = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintStream out = new PrintStream(outstream);
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      final HdfsFileStatus[] partial = first.getPartialListing();
      if (partial.length > 0) {
        out.print(JsonUtil.toJsonString(partial[0], false));
      }
      for(int i = 1; i < partial.length; i++) {
        out.println(',');
        out.print(JsonUtil.toJsonString(partial[i], false));
      }

      for(DirectoryListing curr = first; curr.hasMore(); ) { 
        curr = getDirectoryListing(np, p, curr.getLastName());
        for(HdfsFileStatus s : curr.getPartialListing()) {
          out.println(',');
          out.print(JsonUtil.toJsonString(s, false));
        }
      }

      out.println();
      out.println("]}}");
    }
  };
}
项目:hortonworks-extension    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NameNode np, 
    final String p) throws IOException {
  final DirectoryListing first = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintStream out = new PrintStream(outstream);
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      final HdfsFileStatus[] partial = first.getPartialListing();
      if (partial.length > 0) {
        out.print(JsonUtil.toJsonString(partial[0], false));
      }
      for(int i = 1; i < partial.length; i++) {
        out.println(',');
        out.print(JsonUtil.toJsonString(partial[i], false));
      }

      for(DirectoryListing curr = first; curr.hasMore(); ) { 
        curr = getDirectoryListing(np, p, curr.getLastName());
        for(HdfsFileStatus s : curr.getPartialListing()) {
          out.println(',');
          out.print(JsonUtil.toJsonString(s, false));
        }
      }

      out.println();
      out.println("]}}");
    }
  };
}
项目:hortonworks-extension    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NameNode np, 
    final String p) throws IOException {
  final DirectoryListing first = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintStream out = new PrintStream(outstream);
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      final HdfsFileStatus[] partial = first.getPartialListing();
      if (partial.length > 0) {
        out.print(JsonUtil.toJsonString(partial[0], false));
      }
      for(int i = 1; i < partial.length; i++) {
        out.println(',');
        out.print(JsonUtil.toJsonString(partial[i], false));
      }

      for(DirectoryListing curr = first; curr.hasMore(); ) { 
        curr = getDirectoryListing(np, p, curr.getLastName());
        for(HdfsFileStatus s : curr.getPartialListing()) {
          out.println(',');
          out.print(JsonUtil.toJsonString(s, false));
        }
      }

      out.println();
      out.println("]}}");
    }
  };
}
项目:hadoop    文件:ExceptionHandler.java   
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  if (e instanceof SecurityException) {
    e = toCause(e);
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }

  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
项目:hadoop    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NamenodeProtocols np, 
    final String p) throws IOException {
  // allows exceptions like FNF or ACE to prevent http response of 200 for
  // a failure since we can't (currently) return error responses in the
  // middle of a streaming operation
  final DirectoryListing firstDirList = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  // must save ugi because the streaming object will be executed outside
  // the remote user's ugi
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out = new PrintWriter(new OutputStreamWriter(
          outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      try {
        // restore remote user's ugi
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws IOException {
            long n = 0;
            for (DirectoryListing dirList = firstDirList; ;
                 dirList = getDirectoryListing(np, p, dirList.getLastName())
            ) {
              // send each segment of the directory listing
              for (HdfsFileStatus s : dirList.getPartialListing()) {
                if (n++ > 0) {
                  out.println(',');
                }
                out.print(JsonUtil.toJsonString(s, false));
              }
              // stop if last segment
              if (!dirList.hasMore()) {
                break;
              }
            }
            return null;
          }
        });
      } catch (InterruptedException e) {
        throw new IOException(e);
      }

      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}
项目:hadoop    文件:ExceptionHandler.java   
static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
  Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);

  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
                                       + paramexception.getParameterName() + "\": "
                                       + e.getCause().getMessage(), e);
  } else if (e instanceof ContainerException || e instanceof SecurityException) {
    e = toCause(e);
  } else if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  //Map response status
  final HttpResponseStatus s;
  if (e instanceof SecurityException) {
    s = FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = NOT_FOUND;
  } else if (e instanceof IOException) {
    s = FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = INTERNAL_SERVER_ERROR;
  }

  final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  return resp;
}
项目:aliyun-oss-hadoop-fs    文件:ExceptionHandler.java   
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  if (e instanceof SecurityException) {
    e = toCause(e);
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }

  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
项目:aliyun-oss-hadoop-fs    文件:NamenodeWebHdfsMethods.java   
private Response post(
    final UserGroupInformation ugi,
    final DelegationParam delegation,
    final UserParam username,
    final DoAsParam doAsUser,
    final String fullpath,
    final PostOpParam op,
    final ConcatSourcesParam concatSrcs,
    final BufferSizeParam bufferSize,
    final ExcludeDatanodesParam excludeDatanodes,
    final NewLengthParam newLength
    ) throws IOException, URISyntaxException {
  final NameNode namenode = (NameNode)context.getAttribute("name.node");
  final NamenodeProtocols np = getRPCServer(namenode);

  switch(op.getValue()) {
  case APPEND:
  {
    final URI uri = redirectURI(namenode, ugi, delegation, username,
        doAsUser, fullpath, op.getValue(), -1L, -1L,
        excludeDatanodes.getValue(), bufferSize);
    return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
  }
  case CONCAT:
  {
    np.concat(fullpath, concatSrcs.getAbsolutePaths());
    return Response.ok().build();
  }
  case TRUNCATE:
  {
    if (newLength.getValue() == null) {
      throw new IllegalArgumentException(
          "newLength parameter is Missing");
    }
    // We treat each rest request as a separate client.
    final boolean b = np.truncate(fullpath, newLength.getValue(), 
        "DFSClient_" + DFSUtil.getSecureRandom().nextLong());
    final String js = JsonUtil.toJsonString("boolean", b);
    return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
  }
  default:
    throw new UnsupportedOperationException(op + " is not supported");
  }
}
项目:aliyun-oss-hadoop-fs    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NamenodeProtocols np, 
    final String p) throws IOException {
  // allows exceptions like FNF or ACE to prevent http response of 200 for
  // a failure since we can't (currently) return error responses in the
  // middle of a streaming operation
  final DirectoryListing firstDirList = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  // must save ugi because the streaming object will be executed outside
  // the remote user's ugi
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out = new PrintWriter(new OutputStreamWriter(
          outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      try {
        // restore remote user's ugi
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws IOException {
            long n = 0;
            for (DirectoryListing dirList = firstDirList; ;
                 dirList = getDirectoryListing(np, p, dirList.getLastName())
            ) {
              // send each segment of the directory listing
              for (HdfsFileStatus s : dirList.getPartialListing()) {
                if (n++ > 0) {
                  out.println(',');
                }
                out.print(JsonUtil.toJsonString(s, false));
              }
              // stop if last segment
              if (!dirList.hasMore()) {
                break;
              }
            }
            return null;
          }
        });
      } catch (InterruptedException e) {
        throw new IOException(e);
      }

      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}
项目:aliyun-oss-hadoop-fs    文件:ExceptionHandler.java   
static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
  Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);

  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPTION", e);
  }

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
                                       + paramexception.getParameterName() + "\": "
                                       + e.getCause().getMessage(), e);
  } else if (e instanceof ContainerException || e instanceof SecurityException) {
    e = toCause(e);
  } else if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  //Map response status
  final HttpResponseStatus s;
  if (e instanceof SecurityException) {
    s = FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = NOT_FOUND;
  } else if (e instanceof IOException) {
    s = FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = INTERNAL_SERVER_ERROR;
  }

  final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  return resp;
}
项目:big-c    文件:ExceptionHandler.java   
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  if (e instanceof SecurityException) {
    e = toCause(e);
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }

  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
项目:big-c    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NamenodeProtocols np, 
    final String p) throws IOException {
  // allows exceptions like FNF or ACE to prevent http response of 200 for
  // a failure since we can't (currently) return error responses in the
  // middle of a streaming operation
  final DirectoryListing firstDirList = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  // must save ugi because the streaming object will be executed outside
  // the remote user's ugi
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out = new PrintWriter(new OutputStreamWriter(
          outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      try {
        // restore remote user's ugi
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws IOException {
            long n = 0;
            for (DirectoryListing dirList = firstDirList; ;
                 dirList = getDirectoryListing(np, p, dirList.getLastName())
            ) {
              // send each segment of the directory listing
              for (HdfsFileStatus s : dirList.getPartialListing()) {
                if (n++ > 0) {
                  out.println(',');
                }
                out.print(JsonUtil.toJsonString(s, false));
              }
              // stop if last segment
              if (!dirList.hasMore()) {
                break;
              }
            }
            return null;
          }
        });
      } catch (InterruptedException e) {
        throw new IOException(e);
      }

      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}
项目:big-c    文件:ExceptionHandler.java   
static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
  Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);

  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
                                       + paramexception.getParameterName() + "\": "
                                       + e.getCause().getMessage(), e);
  } else if (e instanceof ContainerException || e instanceof SecurityException) {
    e = toCause(e);
  } else if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  //Map response status
  final HttpResponseStatus s;
  if (e instanceof SecurityException) {
    s = FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = NOT_FOUND;
  } else if (e instanceof IOException) {
    s = FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = INTERNAL_SERVER_ERROR;
  }

  final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
  DefaultFullHttpResponse resp =
    new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));

  resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
  resp.headers().set(CONTENT_LENGTH, js.length);
  return resp;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ExceptionHandler.java   
@Override
public Response toResponse(Exception e) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("GOT EXCEPITION", e);
  }

  //clear content type
  response.setContentType(null);

  //Convert exception
  if (e instanceof ParamException) {
    final ParamException paramexception = (ParamException)e;
    e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
        + paramexception.getParameterName() + "\": "
        + e.getCause().getMessage(), e);
  }
  if (e instanceof ContainerException) {
    e = toCause(e);
  }
  if (e instanceof RemoteException) {
    e = ((RemoteException)e).unwrapRemoteException();
  }

  if (e instanceof SecurityException) {
    e = toCause(e);
  }

  //Map response status
  final Response.Status s;
  if (e instanceof SecurityException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof AuthorizationException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof FileNotFoundException) {
    s = Response.Status.NOT_FOUND;
  } else if (e instanceof IOException) {
    s = Response.Status.FORBIDDEN;
  } else if (e instanceof UnsupportedOperationException) {
    s = Response.Status.BAD_REQUEST;
  } else if (e instanceof IllegalArgumentException) {
    s = Response.Status.BAD_REQUEST;
  } else {
    LOG.warn("INTERNAL_SERVER_ERROR", e);
    s = Response.Status.INTERNAL_SERVER_ERROR;
  }

  final String js = JsonUtil.toJsonString(e);
  return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NamenodeWebHdfsMethods.java   
private static StreamingOutput getListingStream(final NamenodeProtocols np, 
    final String p) throws IOException {
  // allows exceptions like FNF or ACE to prevent http response of 200 for
  // a failure since we can't (currently) return error responses in the
  // middle of a streaming operation
  final DirectoryListing firstDirList = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  // must save ugi because the streaming object will be executed outside
  // the remote user's ugi
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out = new PrintWriter(new OutputStreamWriter(
          outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      try {
        // restore remote user's ugi
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws IOException {
            long n = 0;
            for (DirectoryListing dirList = firstDirList; ;
                 dirList = getDirectoryListing(np, p, dirList.getLastName())
            ) {
              // send each segment of the directory listing
              for (HdfsFileStatus s : dirList.getPartialListing()) {
                if (n++ > 0) {
                  out.println(',');
                }
                out.print(JsonUtil.toJsonString(s, false));
              }
              // stop if last segment
              if (!dirList.hasMore()) {
                break;
              }
            }
            return null;
          }
        });
      } catch (InterruptedException e) {
        throw new IOException(e);
      }

      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}