Java 类org.apache.hadoop.io.Writable 实例源码

项目:hadoop    文件:RandomWriter.java   
/**
 * Given an output filename, write a bunch of random records to it.
 */
public void map(WritableComparable key, 
                Writable value,
                Context context) throws IOException,InterruptedException {
  int itemCount = 0;
  while (numBytesToWrite > 0) {
    int keyLength = minKeySize + 
      (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
    randomKey.setSize(keyLength);
    randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
    int valueLength = minValueSize +
      (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
    randomValue.setSize(valueLength);
    randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
    context.write(randomKey, randomValue);
    numBytesToWrite -= keyLength + valueLength;
    context.getCounter(Counters.BYTES_WRITTEN).increment(keyLength + valueLength);
    context.getCounter(Counters.RECORDS_WRITTEN).increment(1);
    if (++itemCount % 200 == 0) {
      context.setStatus("wrote record " + itemCount + ". " + 
                         numBytesToWrite + " bytes left.");
    }
  }
  context.setStatus("done with " + itemCount + " records.");
}
项目:hadoop-oss    文件:Server.java   
/**
 * Register a RPC kind and the class to deserialize the rpc request.
 * 
 * Called by static initializers of rpcKind Engines
 * @param rpcKind
 * @param rpcRequestWrapperClass - this class is used to deserialze the
 *  the rpc request.
 *  @param rpcInvoker - use to process the calls on SS.
 */

public static void registerProtocolEngine(RPC.RpcKind rpcKind, 
        Class<? extends Writable> rpcRequestWrapperClass,
        RpcInvoker rpcInvoker) {
  RpcKindMapValue  old = 
      rpcKindMap.put(rpcKind, new RpcKindMapValue(rpcRequestWrapperClass, rpcInvoker));
  if (old != null) {
    rpcKindMap.put(rpcKind, old);
    throw new IllegalArgumentException("ReRegistration of rpcKind: " +
        rpcKind);      
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("rpcKind=" + rpcKind +
        ", rpcRequestWrapperClass=" + rpcRequestWrapperClass +
        ", rpcInvoker=" + rpcInvoker);
  }
}
项目:hadoop    文件:Client.java   
private Call(RPC.RpcKind rpcKind, Writable param) {
  this.rpcKind = rpcKind;
  this.rpcRequest = param;

  final Integer id = callId.get();
  if (id == null) {
    this.id = nextCallId();
  } else {
    callId.set(null);
    this.id = id;
  }

  final Integer rc = retryCount.get();
  if (rc == null) {
    this.retry = 0;
  } else {
    this.retry = rc;
  }
}
项目:ditb    文件:HbaseObjectWritableFor96Migration.java   
static Integer getClassCode(final Class<?> c)
throws IOException {
  Integer code = CLASS_TO_CODE.get(c);
  if (code == null ) {
    if (List.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(List.class);
    } else if (Writable.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Writable.class);
    } else if (c.isArray()) {
      code = CLASS_TO_CODE.get(Array.class);
    } else if (Message.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Message.class);
    } else if (Serializable.class.isAssignableFrom(c)){
      code = CLASS_TO_CODE.get(Serializable.class);
    } else if (Scan.class.isAssignableFrom(c)) {
      code = CLASS_TO_CODE.get(Scan.class);
    }
  }
  return code;
}
项目:hadoop    文件:TestJoinTupleWritable.java   
public void testWideTuple2() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[9] = new Text("Number 9");

  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(9);

  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 9) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos,
        has);
    }
  }
}
项目:hadoop    文件:TestTupleWritable.java   
public void testWideTuple2() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[9] = new Text("Number 9");

  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(9);

  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 9) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
    }
  }
}
项目:hadoop    文件:TestTupleWritable.java   
private Writable[] makeRandomWritables() {
  Random r = new Random();
  Writable[] writs = {
    new BooleanWritable(r.nextBoolean()),
    new FloatWritable(r.nextFloat()),
    new FloatWritable(r.nextFloat()),
    new IntWritable(r.nextInt()),
    new LongWritable(r.nextLong()),
    new BytesWritable("dingo".getBytes()),
    new LongWritable(r.nextLong()),
    new IntWritable(r.nextInt()),
    new BytesWritable("yak".getBytes()),
    new IntWritable(r.nextInt())
  };
  return writs;
}
项目:ditb    文件:HFileWriterV2.java   
/**
 * Add a meta block to the end of the file. Call before close(). Metadata
 * blocks are expensive. Fill one with a bunch of serialized data rather than
 * do a metadata block per metadata instance. If metadata is small, consider
 * adding to file info using {@link #appendFileInfo(byte[], byte[])}
 *
 * @param metaBlockName
 *          name of the block
 * @param content
 *          will call readFields to get data later (DO NOT REUSE)
 */
@Override
public void appendMetaBlock(String metaBlockName, Writable content) {
  byte[] key = Bytes.toBytes(metaBlockName);
  int i;
  for (i = 0; i < metaNames.size(); ++i) {
    // stop when the current key is greater than our own
    byte[] cur = metaNames.get(i);
    if (Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0,
        key.length) > 0) {
      break;
    }
  }
  metaNames.add(i, key);
  metaData.add(i, content);
}
项目:hadoop    文件:TestTupleWritable.java   
public void testWideTuple() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[42] = new Text("Number 42");

  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(42);

  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 42) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos, has);
    }
  }
}
项目:hadoop-oss    文件:TestIPCServerResponder.java   
@Override
public void run() {
  for (int i = 0; i < count; i++) {
    try {
      int byteSize = RANDOM.nextInt(BYTE_COUNT);
      byte[] bytes = new byte[byteSize];
      System.arraycopy(BYTES, 0, bytes, 0, byteSize);
      Writable param = new BytesWritable(bytes);
      call(client, param, address);
      Thread.sleep(RANDOM.nextInt(20));
    } catch (Exception e) {
      LOG.fatal("Caught Exception", e);
      failed = true;
    }
  }
}
项目:kafka-connect-hdfs    文件:WALFile.java   
/**
 * Get the 'value' corresponding to the last read 'key'.
 *
 * @param val : The 'value' to be read.
 */
public synchronized void getCurrentValue(Writable val)
    throws IOException {
  if (val instanceof Configurable) {
    ((Configurable) val).setConf(this.conf);
  }
  // Position stream to 'current' value
  seekToCurrentValue();

  val.readFields(valIn);
  if (valIn.read() > 0) {
    log.info("available bytes: " + valIn.available());
    throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength)
                          + " bytes, should read " +
                          (valBuffer.getLength() - keyLength));
  }
}
项目:kafka-connect-hdfs    文件:WALFile.java   
/**
 * Read the next key in the file into <code>key</code>, skipping its value.  True if another
 * entry exists, and false at end of file.
 */
public synchronized boolean next(Writable key) throws IOException {
  if (key.getClass() != WALEntry.class) {
    throw new IOException("wrong key class: " + key.getClass().getName()
                          + " is not " + WALEntry.class);
  }

  outBuf.reset();

  keyLength = next(outBuf);
  if (keyLength < 0) {
    return false;
  }

  valBuffer.reset(outBuf.getData(), outBuf.getLength());

  key.readFields(valBuffer);
  valBuffer.mark(0);
  if (valBuffer.getPosition() != keyLength) {
    throw new IOException(key + " read " + valBuffer.getPosition()
                          + " bytes, should read " + keyLength);
  }


  return true;
}
项目:ditb    文件:HFileWriterV2.java   
private void addBloomFilter(final BloomFilterWriter bfw,
    final BlockType blockType) {
  if (bfw.getKeyCount() <= 0)
    return;

  if (blockType != BlockType.GENERAL_BLOOM_META &&
      blockType != BlockType.DELETE_FAMILY_BLOOM_META) {
    throw new RuntimeException("Block Type: " + blockType.toString() +
        "is not supported");
  }
  additionalLoadOnOpenData.add(new BlockWritable() {
    @Override
    public BlockType getBlockType() {
      return blockType;
    }

    @Override
    public void writeToBlock(DataOutput out) throws IOException {
      bfw.getMetaWriter().write(out);
      Writable dataWriter = bfw.getDataWriter();
      if (dataWriter != null)
        dataWriter.write(out);
    }
  });
}
项目:spark_deep    文件:Server.java   
/**
 * Setup response for the IPC Call.
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, 
                           Call call, Status status, 
                           Writable rv, String errorClass, String error) 
throws IOException {
  response.reset();
  DataOutputStream out = new DataOutputStream(response);
  out.writeInt(call.id);                // write call id
  out.writeInt(status.state);           // write status

  if (status == Status.SUCCESS) {
    rv.write(out);
  } else {
    WritableUtils.writeString(out, errorClass);
    WritableUtils.writeString(out, error);
  }
  /*if (call.connection.useWrap) {
    wrapWithSasl(response, call);
  }*/
  call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}
项目:hadoop    文件:TestJoinTupleWritable.java   
public void testWideWritable() throws Exception {
  Writable[] manyWrits = makeRandomWritables(131);

  TupleWritable sTuple = new TupleWritable(manyWrits);
  for (int i =0; i<manyWrits.length; i++)
  {
    if (i % 3 == 0) {
      sTuple.setWritten(i);
    }
  }
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  sTuple.write(new DataOutputStream(out));
  ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
  TupleWritable dTuple = new TupleWritable();
  dTuple.readFields(new DataInputStream(in));
  assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
  assertEquals("All tuple data has not been read from the stream", 
    -1, in.read());
}
项目:aliyun-maxcompute-data-collectors    文件:PGBulkloadExportMapper.java   
public void map(LongWritable key, Writable value, Context context)
  throws IOException, InterruptedException {
  try {
    String str = value.toString();
    if (value instanceof Text) {
      writer.write(str, 0, str.length());
      writer.newLine();
    } else if (value instanceof SqoopRecord) {
      writer.write(str, 0, str.length());
    }
  } catch (Exception e) {
    doExecuteUpdate("DROP TABLE " + tmpTableName);
    cleanup(context);
    throw new IOException(e);
  }
}
项目:hadoop    文件:TestJoinTupleWritable.java   
private TupleWritable makeTuple(Writable[] writs) {
  Writable[] sub1 = { writs[1], writs[2] };
  Writable[] sub3 = { writs[4], writs[5] };
  Writable[] sub2 = { writs[3], new TupleWritable(sub3), writs[6] };
  Writable[] vals = { writs[0], new TupleWritable(sub1),
                      new TupleWritable(sub2), writs[7], writs[8],
                      writs[9] };
  // [v0, [v1, v2], [v3, [v4, v5], v6], v7, v8, v9]
  TupleWritable ret = new TupleWritable(vals);
  for (int i = 0; i < 6; ++i) {
    ret.setWritten(i);
  }
  ((TupleWritable)sub2[1]).setWritten(0);
  ((TupleWritable)sub2[1]).setWritten(1);
  ((TupleWritable)vals[1]).setWritten(0);
  ((TupleWritable)vals[1]).setWritten(1);
  for (int i = 0; i < 3; ++i) {
    ((TupleWritable)vals[2]).setWritten(i);
  }
  return ret;
}
项目:monarch    文件:MonarchRecordReaderFTableTest.java   
/**
 * Read from Geode, using MonarchRecordReader, all the records from the provided split.
 * The split contains the range of records to be read by the record reader. It
 * returns the total number of records read by this method.
 *
 * @param conf       the reader configuration -- must have the region name
 * @param split      the input-split containing the records to be read
 * @param predicates the predicates to filter out unwanted results
 * @return the total number of records read
 */
private long readUsingRecordReader(final Configuration conf, final InputSplit split,
                                   final Filter... predicates) {
  MonarchRecordReader mrr = new MonarchRecordReader(conf);
  FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
  for (int i=0; i<predicates.length; i++) {
    filterList.addFilter(predicates[i]);
  }
  mrr.pushDownfilters = filterList;
  long size = 0;
  try {
    mrr.initialize(split, conf);
    Writable key = mrr.createKey();
    Writable value = mrr.createValue();
    while (mrr.next(key, value)) {
      ++size;
    }
    mrr.close();
  } catch (IOException e) {
    e.printStackTrace();
  }
  return size;
}
项目:hadoop    文件:TestJoinTupleWritable.java   
public void testWideTuple() throws Exception {
  Text emptyText = new Text("Should be empty");
  Writable[] values = new Writable[64];
  Arrays.fill(values,emptyText);
  values[42] = new Text("Number 42");

  TupleWritable tuple = new TupleWritable(values);
  tuple.setWritten(42);

  for (int pos=0; pos<tuple.size();pos++) {
    boolean has = tuple.has(pos);
    if (pos == 42) {
      assertTrue(has);
    }
    else {
      assertFalse("Tuple position is incorrectly labelled as set: " + pos,
        has);
    }
  }
}
项目:hadoop    文件:SortValidator.java   
public void configure(JobConf job) {
  // 'key' == sortInput for sort-input; key == sortOutput for sort-output
  key = deduceInputFile(job);

  if (key == sortOutput) {
    partitioner = new HashPartitioner<WritableComparable, Writable>();

    // Figure the 'current' partition and no. of reduces of the 'sort'
    try {
      URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE));
      String inputFile = inputURI.getPath();
      // part file is of the form part-r-xxxxx
      partition = Integer.valueOf(inputFile.substring(
        inputFile.lastIndexOf("part") + 7)).intValue();
      noSortReducers = job.getInt(SORT_REDUCES, -1);
    } catch (Exception e) {
      System.err.println("Caught: " + e);
      System.exit(-1);
    }
  }
}
项目:hadoop    文件:CommonStub.java   
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
  int numBytes = WritableUtils.readVInt(inStream);
  byte[] buffer;
  // For BytesWritable and Text, use the specified length to set the length
  // this causes the "obvious" translations to work. So that if you emit
  // a string "abc" from C++, it shows up as "abc".
  if (obj instanceof BytesWritable) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((BytesWritable) obj).set(buffer, 0, numBytes);
  } else if (obj instanceof Text) {
    buffer = new byte[numBytes];
    inStream.readFully(buffer);
    ((Text) obj).set(buffer);
  } else {
    obj.readFields(inStream);
  }
}
项目:hadoop    文件:UtilsForTests.java   
/** The waiting function.  The reduce exits once it gets a signal. Here the
 * signal is the file existence. 
 */
public void reduce(WritableComparable key, Iterator<Writable> val, 
                   OutputCollector<WritableComparable, Writable> output,
                   Reporter reporter)
throws IOException {
  if (fs != null) {
    while (!fs.exists(signal)) {
      try {
        reporter.progress();
        synchronized (this) {
          this.wait(1000); // wait for 1 sec
        }
      } catch (InterruptedException ie) {
        System.out.println("Interrupted while the map was waiting for the"
                           + " signal.");
        break;
      }
    }
  } else {
    throw new IOException("Could not get the DFS!!");
  }
}
项目:oryx2    文件:BatchLayer.java   
public BatchLayer(Config config) {
  super(config);
  this.keyWritableClass = ClassUtils.loadClass(
      config.getString("oryx.batch.storage.key-writable-class"), Writable.class);
  this.messageWritableClass = ClassUtils.loadClass(
      config.getString("oryx.batch.storage.message-writable-class"), Writable.class);
  this.updateClassName = config.getString("oryx.batch.update-class");
  this.dataDirString = config.getString("oryx.batch.storage.data-dir");
  this.modelDirString = config.getString("oryx.batch.storage.model-dir");
  this.maxDataAgeHours = config.getInt("oryx.batch.storage.max-age-data-hours");
  this.maxModelAgeHours = config.getInt("oryx.batch.storage.max-age-model-hours");
  Preconditions.checkArgument(!dataDirString.isEmpty());
  Preconditions.checkArgument(!modelDirString.isEmpty());
  Preconditions.checkArgument(maxDataAgeHours >= 0 || maxDataAgeHours == NO_MAX_AGE);
  Preconditions.checkArgument(maxModelAgeHours >= 0 || maxModelAgeHours == NO_MAX_AGE);
}
项目:multiple-dimension-spread    文件:TestMDSSerde.java   
@Test
public void T_deserialize_1() throws SerDeException{
  MDSSerde serde = new MDSSerde();
  Writable a = new Text( "a" );
  Writable b = (Writable)( serde.deserialize( a ) );
  assertEquals( a , b );
}
项目:wherehowsX    文件:SequenceFileAnalyzer.java   
@Override
public SampleDataRecord getSampleData(Path path) throws IOException {
    SampleDataRecord dataRecord = null;
    if (!fs.exists(path))
        LOG.error("sequence file : " + path.toUri().getPath() + " is not exist on hdfs");
    else {
        try {
            LOG.info("sequencefileanalyzer start parse sampledata for  file path : {}", path.toUri().getPath());
            SequenceFile.Reader reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(path));
            List<Object> sampleValues = new ArrayList<Object>();
            Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf());
            Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf());
            int count = 0;
            String keyName = "Key";
            String valueName = "Value";
            while (reader.next(key, value) && count < 12) {
                sampleValues.add("{\"" + keyName + "\": \"" + key + "\", \"" + valueName + "\": \"" + value + "\"}");
                count++;
            }
            dataRecord = new SampleDataRecord(path.toUri().getPath(), sampleValues);
            LOG.info("sequence file path : {}, sample data is {}", path.toUri().getPath(), sampleValues);
        } catch (Exception e) {
            LOG.error("path : {} content " + " is not Sequence File format content  ",path.toUri().getPath());
            LOG.info(e.getStackTrace().toString());
        }
    }
    return dataRecord;

}
项目:hadoop    文件:TypedBytesWritableOutput.java   
public void writeMap(MapWritable mw) throws IOException {
  out.writeMapHeader(mw.size());
  for (Map.Entry<Writable, Writable> entry : mw.entrySet()) {
    write(entry.getKey());
    write(entry.getValue());
  }
}
项目:oryx2    文件:WritableToValueFunctionTest.java   
@Test
public void testFunction() {
  WritableToValueFunction<String,String> function =
      new WritableToValueFunction<>(String.class, String.class, Text.class, Text.class);
  Tuple2<Writable,Writable> in = new Tuple2<>(new Text("bizz"), new Text("buzz"));
  Tuple2<String,String> out = function.call(in);
  assertEquals("bizz", out._1());
  assertEquals("buzz", out._2());
}
项目:hadoop-oss    文件:Server.java   
public Class<? extends Writable> getRpcRequestWrapper(
    RpcKindProto rpcKind) {
  if (rpcRequestClass != null)
     return rpcRequestClass;
  RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind));
  return (val == null) ? null : val.rpcRequestWrapperClass; 
}
项目:hadoop    文件:CompositeRecordReader.java   
/**
 * Create a value to be used internally for joins.
 */
protected TupleWritable createTupleWritable() {
  Writable[] vals = new Writable[kids.length];
  for (int i = 0; i < vals.length; ++i) {
    vals[i] = kids[i].createValue();
  }
  return new TupleWritable(vals);
}
项目:hadoop    文件:Server.java   
protected Server(String bindAddress, int port,
    Class<? extends Writable> rpcRequestClass, int handlerCount,
    int numReaders, int queueSizePerHandler, Configuration conf,
    String serverName, SecretManager<? extends TokenIdentifier> secretManager)
  throws IOException {
  this(bindAddress, port, rpcRequestClass, handlerCount, numReaders, 
      queueSizePerHandler, conf, serverName, secretManager, null);
}
项目:hadoop    文件:DumpTypedBytes.java   
/**
 * Dump given list of files to standard output as typed bytes.
 */
@SuppressWarnings("unchecked")
private int dumpTypedBytes(List<FileStatus> files) throws IOException {
  JobConf job = new JobConf(getConf()); 
  DataOutputStream dout = new DataOutputStream(System.out);
  AutoInputFormat autoInputFormat = new AutoInputFormat();
  for (FileStatus fileStatus : files) {
    FileSplit split = new FileSplit(fileStatus.getPath(), 0,
      fileStatus.getLen() * fileStatus.getBlockSize(),
      (String[]) null);
    RecordReader recReader = null;
    try {
      recReader = autoInputFormat.getRecordReader(split, job, Reporter.NULL);
      Object key = recReader.createKey();
      Object value = recReader.createValue();
      while (recReader.next(key, value)) {
        if (key instanceof Writable) {
          TypedBytesWritableOutput.get(dout).write((Writable) key);
        } else {
          TypedBytesOutput.get(dout).write(key);
        }
        if (value instanceof Writable) {
          TypedBytesWritableOutput.get(dout).write((Writable) value);
        } else {
          TypedBytesOutput.get(dout).write(value);
        }
      }
    } finally {
      if (recReader != null) {
        recReader.close();
      }
    }
  }
  dout.flush();
  return 0;
}
项目:hadoop-oss    文件:Client.java   
/** Construct an IPC client whose values are of the given {@link Writable}
 * class. */
public Client(Class<? extends Writable> valueClass, Configuration conf, 
    SocketFactory factory) {
  this.valueClass = valueClass;
  this.conf = conf;
  this.socketFactory = factory;
  this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
      CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
  this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.clientId = ClientId.getClientId();
  this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
}
项目:hadoop    文件:TestComparators.java   
public void reduce(IntWritable key, Iterator<Writable> values, 
                   OutputCollector<IntWritable, Text> out,
                   Reporter reporter) throws IOException {
  int currentKey = key.get();
  // keys should be in descending order
  if (currentKey > lastKey) {
    fail("Keys not in sorted descending order");
  }
  lastKey = currentKey;
  out.collect(key, new Text("success"));
}
项目:ditb    文件:Writables.java   
/**
 * Copy one Writable to another.  Copies bytes using data streams.
 * @param bytes Source Writable
 * @param tgt Target Writable
 * @return The target Writable.
 * @throws IOException e
 */
public static Writable copyWritable(final byte [] bytes, final Writable tgt)
throws IOException {
  DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
  try {
    tgt.readFields(dis);
  } finally {
    dis.close();
  }
  return tgt;
}
项目:hadoop    文件:CompositeRecordReader.java   
/**
 * Close all child RRs.
 */
public void close() throws IOException {
  if (kids != null) {
    for (RecordReader<K,? extends Writable> rr : kids) {
      rr.close();
    }
  }
  if (jc != null) {
    jc.close();
  }
}
项目:hadoop-oss    文件:TestIPCServerResponder.java   
static Writable call(Client client, Writable param,
    InetSocketAddress address) throws IOException {
  final ConnectionId remoteId = ConnectionId.getConnectionId(address, null,
      null, 0, null, conf);
  return client.call(RpcKind.RPC_BUILTIN, param, remoteId,
      RPC.RPC_SERVICE_CLASS_DEFAULT, null);
}
项目:hadoop-oss    文件:TestIPCServerResponder.java   
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
    long receiveTime) throws IOException {
  if (sleep) {
    try {
      Thread.sleep(RANDOM.nextInt(20)); // sleep a bit
    } catch (InterruptedException e) {}
  }
  return param;
}
项目:hadoop    文件:Server.java   
protected Server(String bindAddress, int port,
                Class<? extends Writable> paramClass, int handlerCount, 
                Configuration conf)
  throws IOException 
{
  this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer
      .toString(port), null, null);
}
项目:hadoop-oss    文件:TestSerializationFactory.java   
@Test
public void testGetDeserializer() {
  // Test that a valid serializer class is returned when its present
  assertNotNull("A valid class must be returned for default Writable SerDe",
      factory.getDeserializer(Writable.class));
  // Test that a null is returned when none can be found.
  assertNull("A null should be returned if there are no deserializers found",
      factory.getDeserializer(TestSerializationFactory.class));
}
项目:kafka-connect-hdfs    文件:WALFile.java   
/**
 * Read the next key/value pair in the file into <code>key</code> and <code>val</code>.  Returns
 * true if such a pair exists and false when at end of file
 */
public synchronized boolean next(Writable key, Writable val)
    throws IOException {
  if (val.getClass() != WALEntry.class) {
    throw new IOException("wrong value class: " + val + " is not " + WALEntry.class);
  }

  boolean more = next(key);

  if (more) {
    getCurrentValue(val);
  }

  return more;
}