Java 类org.apache.hadoop.io.serializer.WritableSerialization 实例源码

项目:aliyun-oss-hadoop-fs    文件:TestTotalOrderPartitioner.java   
public void testTotalOrderWithCustomSerialization() throws Exception {
  TotalOrderPartitioner<String, NullWritable> partitioner =
      new TotalOrderPartitioner<String, NullWritable>();
  Configuration conf = new Configuration();
  conf.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
      JavaSerialization.class.getName(),
      WritableSerialization.class.getName());
  conf.setClass(MRJobConfig.KEY_COMPARATOR,
      JavaSerializationComparator.class,
      Comparator.class);
  Path p = TestTotalOrderPartitioner.<String>writePartitionFile(
      "totalordercustomserialization", conf, splitJavaStrings);
  conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, String.class, Object.class);
  try {
    partitioner.setConf(conf);
    NullWritable nw = NullWritable.get();
    for (Check<String> chk : testJavaStrings) {
      assertEquals(chk.data.toString(), chk.part,
          partitioner.getPartition(chk.data, nw, splitJavaStrings.length + 1));
    }
  } finally {
    p.getFileSystem(conf).delete(p, true);
  }
}
项目:ignite    文件:HadoopV2TaskContext.java   
/**
 * Gets serializer for specified class.
 *
 * @param cls Class.
 * @param jobConf Job configuration.
 * @return Appropriate serializer.
 */
@SuppressWarnings("unchecked")
private HadoopSerialization getSerialization(Class<?> cls, Configuration jobConf) throws IgniteCheckedException {
    A.notNull(cls, "cls");

    SerializationFactory factory = new SerializationFactory(jobConf);

    Serialization<?> serialization = factory.getSerialization(cls);

    if (serialization == null)
        throw new IgniteCheckedException("Failed to find serialization for: " + cls.getName());

    if (serialization.getClass() == WritableSerialization.class)
        return new HadoopWritableSerialization((Class<? extends Writable>)cls);

    return new HadoopSerializationWrapper(serialization, cls);
}
项目:NeverwinterDP-Commons    文件:AppWorkerContainer.java   
public AppWorkerContainer(AppConfig config) {
  this.config = config ;
  this.appContainerInfoHolder = new AppContainerInfoHolder(config.getAppWorkerContainerId()) ;
  try {
    Configuration rpcConf = new Configuration() ;
    rpcConf.set(
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, 
        JavaSerialization.class.getName() + "," + 
            WritableSerialization.class.getName() + "," +
            AvroSerialization.class.getName()
    ) ;

    rpcClient = new RPCClient(config.appHostName, config.appRpcPort) ;
    ipcService = IPCService.newBlockingStub(rpcClient.getRPCChannel()) ;

    Class<AppWorker> appWorkerClass = (Class<AppWorker>) Class.forName(config.worker) ;
    worker = appWorkerClass.newInstance() ;
  } catch(Throwable error) {
    LOGGER.error("Error" , error);
    onDestroy() ;
  }
}
项目:hiped2    文件:ProtobufSerialization.java   
public static void register(Configuration conf) {
  String[] serializations = conf.getStrings("io.serializations");
  if (ArrayUtils.isEmpty(serializations)) {
    serializations = new String[]{WritableSerialization.class.getName(),
        AvroSpecificSerialization.class.getName(),
        AvroReflectSerialization.class.getName()};
  }
  serializations = (String[]) ArrayUtils.add(serializations, ProtobufSerialization.class.getName());
  conf.setStrings("io.serializations", serializations);
}
项目:ignite    文件:HadoopSerializationWrapperSelfTest.java   
/**
 * Tests read/write of IntWritable via native WritableSerialization.
 * @throws Exception If fails.
 */
public void testIntWritableSerialization() throws Exception {
    HadoopSerialization ser = new HadoopSerializationWrapper(new WritableSerialization(), IntWritable.class);

    ByteArrayOutputStream buf = new ByteArrayOutputStream();

    DataOutput out = new DataOutputStream(buf);

    ser.write(out, new IntWritable(3));
    ser.write(out, new IntWritable(-5));

    assertEquals("[0, 0, 0, 3, -1, -1, -1, -5]", Arrays.toString(buf.toByteArray()));

    DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));

    assertEquals(3, ((IntWritable)ser.read(in, null)).get());
    assertEquals(-5, ((IntWritable)ser.read(in, null)).get());
}
项目:HBase-LOB    文件:SweepJob.java   
/**
 * Runs map reduce to do the sweeping on the mob files.
 * The running of the sweep tool on the same column family are mutually exclusive.
 * The HBase major compaction and running of the sweep tool on the same column family
 * are mutually exclusive.
 * These synchronization is done by the Zookeeper.
 * So in the beginning of the running, we need to make sure only this sweep tool is the only one
 * that is currently running in this column family, and in this column family there're no major
 * compaction in progress.   
 * @param tn The current table name.
 * @param family The descriptor of the current column family.
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 * @throws KeeperException
 */
public void sweep(TableName tn, HColumnDescriptor family) throws IOException, ClassNotFoundException,
    InterruptedException, KeeperException {
  Configuration conf = new Configuration(this.conf);
  // check whether the current user is the same one with the owner of hbase root
  String currentUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  FileStatus[] hbaseRootFileStat = fs.listStatus(new Path(conf.get(HConstants.HBASE_DIR)));
  if (hbaseRootFileStat.length > 0) {
    String owner = hbaseRootFileStat[0].getOwner();
    if (!owner.equals(currentUserName)) {
      String errorMsg = "The current user[" + currentUserName + "] doesn't have the privilege."
          + " Please make sure the user is the root of the target HBase";
      LOG.error(errorMsg);
      throw new IOException(errorMsg);
    }
  } else {
    LOG.error("The target HBase doesn't exist");
    throw new IOException("The target HBase doesn't exist");
  }
  String familyName = family.getNameAsString();
  Job job = null;
  try {
    Scan scan = new Scan();
    // Do not retrieve the mob data when scanning
    scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE));
    scan.setFilter(new ReferenceOnlyFilter());
    scan.setCaching(10000);
    scan.setCacheBlocks(false);
    scan.setMaxVersions(family.getMaxVersions());
    conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
        JavaSerialization.class.getName() + "," + WritableSerialization.class.getName());
    job = prepareJob(tn, familyName, scan, conf);
    job.getConfiguration().set(TableInputFormat.SCAN_COLUMN_FAMILY, familyName);
    // Record the compaction start time.
    // In the sweep tool, only the mob file whose modification time is older than
    // (startTime - delay) could be handled by this tool.
    // The delay is one day. It could be configured as well, but this is only used
    // in the test.
    job.getConfiguration().setLong(MobConstants.MOB_COMPACTION_START_DATE,
        compactionStartTime);

    job.setPartitionerClass(MobFilePathHashPartitioner.class);
    submit(job, tn, familyName);
    if (job.waitForCompletion(true)) {
      // Archive the unused mob files.
      removeUnusedFiles(job, tn, family);
    }
  } finally {
    cleanup(job, tn, familyName);
  }
}