Java 类org.apache.hadoop.hdfs.web.resources.HttpOpParam 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final String nnId,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnId == null) {
    throw new IllegalArgumentException(NamenodeAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = deserializeToken
            (delegation.getValue(), nnId);
    ugi.addToken(token);
  }
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
    final HttpURLConnection conn, boolean unwrapException) throws IOException {
  final int code = conn.getResponseCode();
  if (code != op.getExpectedHttpResponseCode()) {
    final Map<?, ?> m;
    try {
      m = jsonParse(conn, true);
    } catch(Exception e) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage(), e);
    }

    if (m == null) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage());
    } else if (m.get(RemoteException.class.getSimpleName()) == null) {
      return m;
    }

    final RemoteException re = JsonUtil.toRemoteException(m);
    throw unwrapException? toIOException(re): re;
  }
  return null;
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
  List<Param<?,?>> authParams = Lists.newArrayList();    
  // Skip adding delegation token for token operations because these
  // operations require authentication.
  Token<?> token = null;
  if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
    token = getDelegationToken();
  }
  if (token != null) {
    authParams.add(new DelegationParam(token.encodeToUrlString()));
  } else {
    UserGroupInformation userUgi = ugi;
    UserGroupInformation realUgi = userUgi.getRealUser();
    if (realUgi != null) { // proxy user
      authParams.add(new DoAsParam(userUgi.getShortUserName()));
      userUgi = realUgi;
    }
    authParams.add(new UserParam(userUgi.getShortUserName()));
  }
  return authParams.toArray(new Param<?,?>[0]);
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data. 
 * Step 2) Submit another Http request with the URL from the Location header with data.
 * 
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;

  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
  conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
  connect();
  return conn;
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
void getResponse(boolean getJsonAndDisconnect) throws IOException {
  try {
    connect();
    final int code = conn.getResponseCode();
    if (!redirected && op.getRedirect()
        && code != op.getExpectedHttpResponseCode()) {
      final String redirect = conn.getHeaderField("Location");
      json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
          conn, false);
      disconnect();

      checkRetry = false;
      conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
      connect();
    }

    json = validateResponse(op, conn, false);
    if (json == null && getJsonAndDisconnect) {
      json = jsonParse(conn, false);
    }
  } finally {
    if (getJsonAndDisconnect) {
      disconnect();
    }
  }
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(new BufferedOutputStream(
      conn.getOutputStream(), bufferSize), statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  statistics.incrementWriteOps(1);

  final HttpOpParam.Op op = PutOpParam.Op.CREATE;
  return new Runner(op, f, 
      new PermissionParam(applyUMask(permission)),
      new OverwriteParam(overwrite),
      new BufferSizeParam(bufferSize),
      new ReplicationParam(replication),
      new BlockSizeParam(blockSize))
    .run()
    .write(bufferSize);
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json  = run(op, f);
  final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for(int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>)array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
项目:hadoop-plus    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnRpcAddr == null) {
    throw new IllegalArgumentException(NamenodeRpcAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = 
        new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
项目:FlexMap    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final String nnId,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnId == null) {
    throw new IllegalArgumentException(NamenodeAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = deserializeToken
            (delegation.getValue(), nnId);
    ugi.addToken(token);
  }
}
项目:hops    文件:WebHdfsFileSystem.java   
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data.
 * Step 2) Submit another Http request with the URL from the Location header
 * with data.
 * <p/>
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn,
      false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;

  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection) new URL(redirect).openConnection();
  conn.setRequestProperty("Content-Type",
      MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
  connect();
  return conn;
}
项目:hops    文件:WebHdfsFileSystem.java   
void getResponse(boolean getJsonAndDisconnect) throws IOException {
  try {
    connect();
    final int code = conn.getResponseCode();
    if (!redirected && op.getRedirect() &&
        code != op.getExpectedHttpResponseCode()) {
      final String redirect = conn.getHeaderField("Location");
      json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
          conn, false);
      disconnect();

      checkRetry = false;
      conn = (HttpURLConnection) new URL(redirect).openConnection();
      connect();
    }

    json = validateResponse(op, conn, false);
    if (json == null && getJsonAndDisconnect) {
      json = jsonParse(conn, false);
    }
  } finally {
    if (getJsonAndDisconnect) {
      disconnect();
    }
  }
}
项目:hops    文件:WebHdfsFileSystem.java   
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(
      new BufferedOutputStream(conn.getOutputStream(), bufferSize),
      statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
项目:hops    文件:WebHdfsFileSystem.java   
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json = run(op, f);
  final Map<?, ?> rootmap =
      (Map<?, ?>) json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array =
      (Object[]) rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for (int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>) array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
项目:hops    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path +
        ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnRpcAddr == null) {
    throw new IllegalArgumentException(
        NamenodeRpcAddressParam.NAME + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token =
        new Token<>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
    final HttpURLConnection conn, boolean unwrapException) throws IOException {
  final int code = conn.getResponseCode();
  if (code != op.getExpectedHttpResponseCode()) {
    final Map<?, ?> m;
    try {
      m = jsonParse(conn, true);
    } catch(Exception e) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage(), e);
    }

    if (m == null) {
      throw new IOException("Unexpected HTTP response: code=" + code + " != "
          + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
          + ", message=" + conn.getResponseMessage());
    } else if (m.get(RemoteException.class.getSimpleName()) == null) {
      return m;
    }

    final RemoteException re = JsonUtil.toRemoteException(m);
    throw unwrapException? toIOException(re): re;
  }
  return null;
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
  List<Param<?,?>> authParams = Lists.newArrayList();    
  // Skip adding delegation token for token operations because these
  // operations require authentication.
  Token<?> token = null;
  if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
    token = getDelegationToken();
  }
  if (token != null) {
    authParams.add(new DelegationParam(token.encodeToUrlString()));
  } else {
    UserGroupInformation userUgi = ugi;
    UserGroupInformation realUgi = userUgi.getRealUser();
    if (realUgi != null) { // proxy user
      authParams.add(new DoAsParam(userUgi.getShortUserName()));
      userUgi = realUgi;
    }
    authParams.add(new UserParam(userUgi.getShortUserName()));
  }
  return authParams.toArray(new Param<?,?>[0]);
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
void getResponse(boolean getJsonAndDisconnect) throws IOException {
  try {
    connect();
    final int code = conn.getResponseCode();
    if (!redirected && op.getRedirect()
        && code != op.getExpectedHttpResponseCode()) {
      final String redirect = conn.getHeaderField("Location");
      json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op),
          conn, false);
      disconnect();

      checkRetry = false;
      conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
      connect();
    }

    json = validateResponse(op, conn, false);
    if (json == null && getJsonAndDisconnect) {
      json = jsonParse(conn, false);
    }
  } finally {
    if (getJsonAndDisconnect) {
      disconnect();
    }
  }
}
项目:hadoop-on-lustre2    文件:WebHdfsFileSystem.java   
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(new BufferedOutputStream(
      conn.getOutputStream(), bufferSize), statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(new BufferedOutputStream(
      conn.getOutputStream(), bufferSize), statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  statistics.incrementWriteOps(1);

  final HttpOpParam.Op op = PutOpParam.Op.CREATE;
  return new Runner(op, f, 
      new PermissionParam(applyUMask(permission)),
      new OverwriteParam(overwrite),
      new BufferSizeParam(bufferSize),
      new ReplicationParam(replication),
      new BlockSizeParam(blockSize))
    .run()
    .write(bufferSize);
}
项目:hadoop-TCP    文件:WebHdfsFileSystem.java   
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json  = run(op, f);
  final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for(int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>)array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
项目:hadoop-TCP    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final InetSocketAddress nnRpcAddr,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnRpcAddr == null) {
    throw new IllegalArgumentException(NamenodeRpcAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = 
        new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
项目:hadoop-on-lustre2    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi,
    final DelegationParam delegation, final String nnId,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }
  if (nnId == null) {
    throw new IllegalArgumentException(NamenodeAddressParam.NAME
        + " is not specified.");
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final Token<DelegationTokenIdentifier> token = deserializeToken
            (delegation.getValue(), nnId);
    ugi.addToken(token);
  }
}
项目:hadoop-on-lustre    文件:WebHdfsFileSystem.java   
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data. 
 * Step 2) Submit another Http request with the URL from the Location header with data.
 * 
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;

  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection)new URL(redirect).openConnection();
  conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //use 32kB-chunks
  connect();
  return conn;
}
项目:hadoop-on-lustre    文件:WebHdfsFileSystem.java   
@Override
public FSDataOutputStream create(final Path f, final FsPermission permission,
    final boolean overwrite, final int bufferSize, final short replication,
    final long blockSize, final Progressable progress) throws IOException {
  statistics.incrementWriteOps(1);

  final HttpOpParam.Op op = PutOpParam.Op.CREATE;
  return new Runner(op, f, 
      new PermissionParam(applyUMask(permission)),
      new OverwriteParam(overwrite),
      new BufferSizeParam(bufferSize),
      new ReplicationParam(replication),
      new BlockSizeParam(blockSize))
    .run()
    .write(bufferSize);
}
项目:hadoop-on-lustre    文件:WebHdfsFileSystem.java   
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json  = run(op, f);
  final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for(int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>)array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
项目:hadoop-on-lustre2    文件:WebHdfsFileSystem.java   
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
  statistics.incrementReadOps(1);

  final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
  final Map<?, ?> json  = run(op, f);
  final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
  final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());

  //convert FileStatus
  final FileStatus[] statuses = new FileStatus[array.length];
  for(int i = 0; i < array.length; i++) {
    final Map<?, ?> m = (Map<?, ?>)array[i];
    statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
  }
  return statuses;
}
项目:hadoop-on-lustre    文件:DatanodeWebHdfsMethods.java   
private void init(final UserGroupInformation ugi, final DelegationParam delegation,
    final UriFsPathParam path, final HttpOpParam<?> op,
    final Param<?, ?>... parameters) throws IOException {
  if (LOG.isTraceEnabled()) {
    LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
        + ", ugi=" + ugi + Param.toSortedString(", ", parameters));
  }

  //clear content type
  response.setContentType(null);

  if (UserGroupInformation.isSecurityEnabled()) {
    //add a token for RPC.
    final DataNode datanode = (DataNode)context.getAttribute("datanode");
    final InetSocketAddress nnRpcAddr = NameNode.getAddress(datanode.getConf());
    final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(delegation.getValue());
    SecurityUtil.setTokenService(token, nnRpcAddr);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    ugi.addToken(token);
  }
}
项目:hardfs    文件:WebHdfsFileSystem.java   
/**
 * Two-step Create/Append:
 * Step 1) Submit a Http request with neither auto-redirect nor data. 
 * Step 2) Submit another Http request with the URL from the Location header with data.
 * 
 * The reason of having two-step create/append is for preventing clients to
 * send out the data before the redirect. This issue is addressed by the
 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
 * and Java 6 http client), which do not correctly implement "Expect:
 * 100-continue". The two-step create/append is a temporary workaround for
 * the software library bugs.
 */
HttpURLConnection twoStepWrite() throws IOException {
  //Step 1) Submit a Http request with neither auto-redirect nor data. 
  connect(false);
  validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false);
  final String redirect = conn.getHeaderField("Location");
  disconnect();
  checkRetry = false;

  //Step 2) Submit another Http request with the URL from the Location header with data.
  conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect));
  conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
  conn.setChunkedStreamingMode(32 << 10); //32kB-chunk
  connect();
  return conn;
}
项目:hadoop-on-lustre2    文件:WebHdfsFileSystem.java   
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
  List<Param<?,?>> authParams = Lists.newArrayList();    
  // Skip adding delegation token for token operations because these
  // operations require authentication.
  Token<?> token = null;
  if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) {
    token = getDelegationToken();
  }
  if (token != null) {
    authParams.add(new DelegationParam(token.encodeToUrlString()));
  } else {
    UserGroupInformation userUgi = ugi;
    UserGroupInformation realUgi = userUgi.getRealUser();
    if (realUgi != null) { // proxy user
      authParams.add(new DoAsParam(userUgi.getShortUserName()));
      userUgi = realUgi;
    }
    authParams.add(new UserParam(userUgi.getShortUserName()));
  }
  return authParams.toArray(new Param<?,?>[0]);
}
项目:hardfs    文件:WebHdfsFileSystem.java   
FSDataOutputStream write(final HttpOpParam.Op op,
    final HttpURLConnection conn, final int bufferSize) throws IOException {
  return new FSDataOutputStream(new BufferedOutputStream(
      conn.getOutputStream(), bufferSize), statistics) {
    @Override
    public void close() throws IOException {
      try {
        super.close();
      } finally {
        try {
          validateResponse(op, conn, true);
        } finally {
          conn.disconnect();
        }
      }
    }
  };
}
项目:hadoop    文件:WebHdfsTestUtil.java   
public static URL toUrl(final WebHdfsFileSystem webhdfs,
    final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  final URL url = webhdfs.toUrl(op, fspath, parameters);
  WebHdfsTestUtil.LOG.info("url=" + url);
  return url;
}
项目:hadoop    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testJsonParseClosesInputStream() throws Exception {
  final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
  Path file = getTestRootPath(fSys, "test/hadoop/file");
  createFile(file);
  final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
  final URL url = webhdfs.toUrl(op, file);
  final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  conn.setRequestMethod(op.getType().toString());
  conn.connect();

  InputStream myIn = new InputStream(){
    private HttpURLConnection localConn = conn;
    @Override
    public void close() throws IOException {
      closedInputStream = true;
      localConn.getInputStream().close();
    }
    @Override
    public int read() throws IOException {
      return localConn.getInputStream().read();
    }
  };
  final HttpURLConnection spyConn = spy(conn);
  doReturn(myIn).when(spyConn).getInputStream();

  try {
    Assert.assertFalse(closedInputStream);
    WebHdfsFileSystem.jsonParse(spyConn, false);
    Assert.assertTrue(closedInputStream);
  } catch(IOException ioe) {
    junit.framework.TestCase.fail();
  }
  conn.disconnect();
}
项目:aliyun-oss-hadoop-fs    文件:WebHdfsTestUtil.java   
public static URL toUrl(final WebHdfsFileSystem webhdfs,
    final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  final URL url = webhdfs.toUrl(op, fspath, parameters);
  WebHdfsTestUtil.LOG.info("url=" + url);
  return url;
}
项目:aliyun-oss-hadoop-fs    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testJsonParseClosesInputStream() throws Exception {
  final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
  Path file = getTestRootPath(fSys, "test/hadoop/file");
  createFile(file);
  final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
  final URL url = webhdfs.toUrl(op, file);
  final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  conn.setRequestMethod(op.getType().toString());
  conn.connect();

  InputStream myIn = new InputStream(){
    private HttpURLConnection localConn = conn;
    @Override
    public void close() throws IOException {
      closedInputStream = true;
      localConn.getInputStream().close();
    }
    @Override
    public int read() throws IOException {
      return localConn.getInputStream().read();
    }
  };
  final HttpURLConnection spyConn = spy(conn);
  doReturn(myIn).when(spyConn).getInputStream();

  try {
    Assert.assertFalse(closedInputStream);
    WebHdfsFileSystem.jsonParse(spyConn, false);
    Assert.assertTrue(closedInputStream);
  } catch(IOException ioe) {
    junit.framework.TestCase.fail();
  }
  conn.disconnect();
}
项目:big-c    文件:WebHdfsTestUtil.java   
public static URL toUrl(final WebHdfsFileSystem webhdfs,
    final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  final URL url = webhdfs.toUrl(op, fspath, parameters);
  WebHdfsTestUtil.LOG.info("url=" + url);
  return url;
}
项目:big-c    文件:TestFSMainOperationsWebHdfs.java   
@Test
public void testJsonParseClosesInputStream() throws Exception {
  final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
  Path file = getTestRootPath(fSys, "test/hadoop/file");
  createFile(file);
  final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
  final URL url = webhdfs.toUrl(op, file);
  final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  conn.setRequestMethod(op.getType().toString());
  conn.connect();

  InputStream myIn = new InputStream(){
    private HttpURLConnection localConn = conn;
    @Override
    public void close() throws IOException {
      closedInputStream = true;
      localConn.getInputStream().close();
    }
    @Override
    public int read() throws IOException {
      return localConn.getInputStream().read();
    }
  };
  final HttpURLConnection spyConn = spy(conn);
  doReturn(myIn).when(spyConn).getInputStream();

  try {
    Assert.assertFalse(closedInputStream);
    WebHdfsFileSystem.jsonParse(spyConn, false);
    Assert.assertTrue(closedInputStream);
  } catch(IOException ioe) {
    junit.framework.TestCase.fail();
  }
  conn.disconnect();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:WebHdfsTestUtil.java   
public static URL toUrl(final WebHdfsFileSystem webhdfs,
    final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  final URL url = webhdfs.toUrl(op, fspath, parameters);
  WebHdfsTestUtil.LOG.info("url=" + url);
  return url;
}
项目:hadoop-plus    文件:WebHdfsFileSystem.java   
URL toUrl(final HttpOpParam.Op op, final Path fspath,
    final Param<?,?>... parameters) throws IOException {
  //initialize URI path and query
  final String path = PATH_PREFIX
      + (fspath == null? "/": makeQualified(fspath).toUri().getRawPath());
  final String query = op.toQueryString()
      + Param.toSortedString("&", getAuthParameters(op))
      + Param.toSortedString("&", parameters);
  final URL url = getNamenodeURL(path, query);
  if (LOG.isTraceEnabled()) {
    LOG.trace("url=" + url);
  }
  return url;
}