Java 类org.apache.hadoop.io.file.tfile.TFile.Writer 实例源码

项目:hadoop-oss    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-oss    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop-oss    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-oss    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hadoop    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:aliyun-oss-hadoop-fs    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:aliyun-oss-hadoop-fs    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:aliyun-oss-hadoop-fs    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:aliyun-oss-hadoop-fs    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:big-c    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:big-c    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:big-c    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:big-c    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hadoop-EAR    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-EAR    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop-EAR    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-EAR    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hadoop-plus    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-plus    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop-plus    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-plus    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hops    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hops    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hops    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hops    文件:TestTFileByteArrays.java   
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hadoop-TCP    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-TCP    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
项目:hadoop-TCP    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-on-lustre    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hadoop-on-lustre    文件:TestTFileSplit.java   
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
项目:hadoop-on-lustre    文件:TestTFileByteArrays.java   
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
项目:hardfs    文件:TestTFile.java   
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
项目:hardfs    文件:TestTFile.java   
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}