Java 类org.apache.hadoop.hdfs.util.AtomicFileOutputStream 实例源码

项目:hadoop-EAR    文件:NNStorage.java   
/**
 * Write last checkpoint time into a separate file.
 *
 * @param sd
 * @throws IOException
 */
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
  if (txid < -1) {
    // -1 is valid when formatting
    throw new IOException("Bad txid: " + txid);
  }
  File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
  OutputStream fos = new AtomicFileOutputStream(txIdFile);
  try {
    fos.write(String.valueOf(txid).getBytes());
    fos.write('\n');
    fos.close();
    fos = null;
  } finally {
    IOUtils.cleanup(LOG, fos);
  }
}
项目:hadoop    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:big-c    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-EAR    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = journalStorage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-plus    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:FlexMap    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-TCP    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-on-lustre    文件:FSImage.java   
/**
 * Write last checkpoint time into a separate file.
 * 
 * @param sd
 * @throws IOException
 */
void writeCheckpointTime(StorageDirectory sd) throws IOException {
  if (checkpointTime < 0L)
    return; // do not write negative time
  File timeFile = getImageFile(sd, NameNodeFile.TIME);
  DataOutputStream out = new DataOutputStream(
      new AtomicFileOutputStream(timeFile));
  try {
    out.writeLong(checkpointTime);
  } finally {
    out.close();
  }
}
项目:hardfs    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hadoop-on-lustre2    文件:Journal.java   
/**
 * Persist data for recovering the given segment from disk.
 */
private void persistPaxosData(long segmentTxId,
    PersistedRecoveryPaxosData newData) throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  boolean success = false;
  AtomicFileOutputStream fos = new AtomicFileOutputStream(f);
  try {
    newData.writeDelimitedTo(fos);
    fos.write('\n');
    // Write human-readable data after the protobuf. This is only
    // to assist in debugging -- it's not parsed at all.
    OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8);

    writer.write(String.valueOf(newData));
    writer.write('\n');
    writer.flush();

    fos.flush();
    success = true;
  } finally {
    if (success) {
      IOUtils.closeStream(fos);
    } else {
      fos.abort();
    }
  }
}
项目:hortonworks-extension    文件:FSImage.java   
/**
 * Write last checkpoint time into a separate file.
 * 
 * @param sd
 * @throws IOException
 */
void writeCheckpointTime(StorageDirectory sd) throws IOException {
  if (checkpointTime < 0L)
    return; // do not write negative time
  File timeFile = getImageFile(sd, NameNodeFile.TIME);
  DataOutputStream out = new DataOutputStream(
      new AtomicFileOutputStream(timeFile));
  try {
    out.writeLong(checkpointTime);
  } finally {
    out.close();
  }
}
项目:hortonworks-extension    文件:FSImage.java   
/**
 * Write last checkpoint time into a separate file.
 * 
 * @param sd
 * @throws IOException
 */
void writeCheckpointTime(StorageDirectory sd) throws IOException {
  if (checkpointTime < 0L)
    return; // do not write negative time
  File timeFile = getImageFile(sd, NameNodeFile.TIME);
  DataOutputStream out = new DataOutputStream(
      new AtomicFileOutputStream(timeFile));
  try {
    out.writeLong(checkpointTime);
  } finally {
    out.close();
  }
}