Java 类org.apache.hadoop.io.VersionMismatchException 实例源码

项目:fst-bench    文件:ParseStatus.java   
public void readFields(DataInput in) throws IOException {
   byte version = in.readByte();
   switch(version) {
   case 1:
     majorCode = in.readByte();
     minorCode = in.readShort();
     args = WritableUtils.readCompressedStringArray(in);
     break;
   case 2:
     majorCode = in.readByte();
     minorCode = in.readShort();
     args = WritableUtils.readStringArray(in);
     break;
   default:
     throw new VersionMismatchException(VERSION, version);
   }
}
项目:GeoCrawler    文件:ProtocolStatus.java   
public void readFields(DataInput in) throws IOException {
  byte version = in.readByte();
  switch (version) {
  case 1:
    code = in.readByte();
    lastModified = in.readLong();
    args = WritableUtils.readCompressedStringArray(in);
    break;
  case VERSION:
    code = in.readByte();
    lastModified = in.readLong();
    args = WritableUtils.readStringArray(in);
    break;
  default:
    throw new VersionMismatchException(VERSION, version);
  }
}
项目:GeoCrawler    文件:ParseStatus.java   
public void readFields(DataInput in) throws IOException {
  byte version = in.readByte();
  switch (version) {
  case 1:
    majorCode = in.readByte();
    minorCode = in.readShort();
    args = WritableUtils.readCompressedStringArray(in);
    break;
  case 2:
    majorCode = in.readByte();
    minorCode = in.readShort();
    args = WritableUtils.readStringArray(in);
    break;
  default:
    throw new VersionMismatchException(VERSION, version);
  }
}
项目:GeoCrawler    文件:NutchDocument.java   
public void readFields(DataInput in) throws IOException {
  fields.clear();
  byte version = in.readByte();
  if (version != VERSION) {
    throw new VersionMismatchException(VERSION, version);
  }
  int size = WritableUtils.readVInt(in);
  for (int i = 0; i < size; i++) {
    String name = Text.readString(in);
    NutchField field = new NutchField();
    field.readFields(in);
    fields.put(name, field);
  }
  weight = in.readFloat();
  documentMeta.readFields(in);
}
项目:anthelion    文件:ProtocolStatus.java   
public void readFields(DataInput in) throws IOException {
  byte version = in.readByte();
  switch(version) {
  case 1:
    code = in.readByte();
    lastModified = in.readLong();
    args = WritableUtils.readCompressedStringArray(in);
    break;
  case VERSION:
    code = in.readByte();
    lastModified = in.readLong();
    args = WritableUtils.readStringArray(in);
    break;
  default:
    throw new VersionMismatchException(VERSION, version);
  }
}
项目:anthelion    文件:ParseStatus.java   
public void readFields(DataInput in) throws IOException {
   byte version = in.readByte();
   switch(version) {
   case 1:
     majorCode = in.readByte();
     minorCode = in.readShort();
     args = WritableUtils.readCompressedStringArray(in);
     break;
   case 2:
     majorCode = in.readByte();
     minorCode = in.readShort();
     args = WritableUtils.readStringArray(in);
     break;
   default:
     throw new VersionMismatchException(VERSION, version);
   }
}
项目:anthelion    文件:NutchDocument.java   
public void readFields(DataInput in) throws IOException {
  fields.clear();
  byte version = in.readByte();
  if (version != VERSION) {
    throw new VersionMismatchException(VERSION, version);
  }
  int size = WritableUtils.readVInt(in);
  for (int i = 0; i < size; i++) {
    String name = Text.readString(in);
    NutchField field = new NutchField();
    field.readFields(in);
    fields.put(name, field);
  }
  weight = in.readFloat();
  documentMeta.readFields(in);
}
项目:kafka-connect-hdfs    文件:WALFile.java   
Writer(Configuration conf, Option... opts) throws IOException {
  BlockSizeOption blockSizeOption =
      Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption =
      Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption =
      Options.getOption(ReplicationOption.class, opts);

  FileOption fileOption = Options.getOption(FileOption.class, opts);
  AppendIfExistsOption appendIfExistsOption = Options.getOption(
      AppendIfExistsOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);

  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    fs = p.getFileSystem(conf);
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
                     bufferSizeOption.getValue();
    short replication = replicationOption == null ?
                        fs.getDefaultReplication(p) :
                        (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
                     blockSizeOption.getValue();

    if (appendIfExistsOption != null && appendIfExistsOption.getValue()
        && fs.exists(p)) {
      // Read the file and verify header details
      try (WALFile.Reader reader =
               new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
        if (reader.getVersion() != VERSION[3]) {
          throw new VersionMismatchException(VERSION[3], reader.getVersion());
        }
        sync = reader.getSync();
      }
      out = fs.append(p, bufferSize);
      this.appendMode = true;
    } else {
      out = fs.create(p, true, bufferSize, replication, blockSize);
    }
  } else {
    out = streamOption.getValue();
  }

  init(conf, out, ownStream);
}
项目:fst-bench    文件:ParseData.java   
public final void readFields(DataInput in) throws IOException {

        version = in.readByte();
        // incompatible change from UTF8 (version < 5) to Text
        if (version != VERSION)
            throw new VersionMismatchException(VERSION, version);

        status = ParseStatus.read(in);
        title = Text.readString(in);                   // read title

        int numOutlinks = in.readInt();    
        outlinks = new Outlink[numOutlinks];
        for (int i = 0; i < numOutlinks; i++) {
            outlinks[i] = Outlink.read(in);
        }

        if (version < 3) {
            int propertyCount = in.readInt();             // read metadata
            contentMeta.clear();
            for (int i = 0; i < propertyCount; i++) {
                contentMeta.add(Text.readString(in), Text.readString(in));
            }
        } else {
            contentMeta.clear();
            contentMeta.readFields(in);
        }
        if (version > 3) {
            parseMeta.clear();
            parseMeta.readFields(in);
        }
    }
项目:GeoCrawler    文件:Content.java   
private final void readFieldsCompressed(DataInput in) throws IOException {
  byte oldVersion = in.readByte();
  switch (oldVersion) {
  case 0:
  case 1:
    url = Text.readString(in); // read url
    base = Text.readString(in); // read base

    content = new byte[in.readInt()]; // read content
    in.readFully(content);

    contentType = Text.readString(in); // read contentType
    // reconstruct metadata
    int keySize = in.readInt();
    String key;
    for (int i = 0; i < keySize; i++) {
      key = Text.readString(in);
      int valueSize = in.readInt();
      for (int j = 0; j < valueSize; j++) {
        metadata.add(key, Text.readString(in));
      }
    }
    break;
  case 2:
    url = Text.readString(in); // read url
    base = Text.readString(in); // read base

    content = new byte[in.readInt()]; // read content
    in.readFully(content);

    contentType = Text.readString(in); // read contentType
    metadata.readFields(in); // read meta data
    break;
  default:
    throw new VersionMismatchException((byte) 2, oldVersion);
  }

}
项目:GeoCrawler    文件:Content.java   
public final void readFields(DataInput in) throws IOException {
  metadata.clear();
  int sizeOrVersion = in.readInt();
  if (sizeOrVersion < 0) { // version
    version = sizeOrVersion;
    switch (version) {
    case VERSION:
      url = Text.readString(in);
      base = Text.readString(in);

      content = new byte[in.readInt()];
      in.readFully(content);

      contentType = Text.readString(in);
      metadata.readFields(in);
      break;
    default:
      throw new VersionMismatchException((byte) VERSION, (byte) version);
    }
  } else { // size
    byte[] compressed = new byte[sizeOrVersion];
    in.readFully(compressed, 0, compressed.length);
    ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
    DataInput inflater = new DataInputStream(
        new InflaterInputStream(deflated));
    readFieldsCompressed(inflater);
  }
}
项目:LCIndex-HBase-0.94.16    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:anthelion    文件:Content.java   
private final void readFieldsCompressed(DataInput in) throws IOException {
  byte oldVersion = in.readByte();
  switch (oldVersion) {
  case 0:
  case 1:
    url = Text.readString(in); // read url
    base = Text.readString(in); // read base

    content = new byte[in.readInt()]; // read content
    in.readFully(content);

    contentType = Text.readString(in); // read contentType
    // reconstruct metadata
    int keySize = in.readInt();
    String key;
    for (int i = 0; i < keySize; i++) {
      key = Text.readString(in);
      int valueSize = in.readInt();
      for (int j = 0; j < valueSize; j++) {
        metadata.add(key, Text.readString(in));
      }
    }
    break;
  case 2:
    url = Text.readString(in); // read url
    base = Text.readString(in); // read base

    content = new byte[in.readInt()]; // read content
    in.readFully(content);

    contentType = Text.readString(in); // read contentType
    metadata.readFields(in); // read meta data
    break;
  default:
    throw new VersionMismatchException((byte)2, oldVersion);
  }

}
项目:anthelion    文件:Content.java   
public final void readFields(DataInput in) throws IOException {
  metadata.clear();
  int sizeOrVersion = in.readInt();
  if (sizeOrVersion < 0) { // version
    version = sizeOrVersion;
    switch (version) {
    case VERSION:
      url = Text.readString(in);
      base = Text.readString(in);

      content = new byte[in.readInt()];
      in.readFully(content);

      contentType = Text.readString(in);
      metadata.readFields(in);
      break;
    default:
      throw new VersionMismatchException((byte)VERSION, (byte)version);
    }
  } else { // size
    byte[] compressed = new byte[sizeOrVersion];
    in.readFully(compressed, 0, compressed.length);
    ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
    DataInput inflater =
      new DataInputStream(new InflaterInputStream(deflated));
    readFieldsCompressed(inflater);
  }
}
项目:IRIndex    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:RStore    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:HBase-Research    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:hbase-0.94.8-qod    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:hbase-0.94.8-qod    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:hindex    文件:Invocation.java   
public void readFields(DataInput in) throws IOException {
  try {
    super.readFields(in);
    methodName = in.readUTF();
    clientVersion = in.readLong();
    clientMethodsHash = in.readInt();
  } catch (VersionMismatchException e) {
    // VersionMismatchException doesn't provide an API to access
    // expectedVersion and foundVersion.  This is really sad.
    if (e.toString().endsWith("found v0")) {
      // Try to be a bit backwards compatible.  In previous versions of
      // HBase (before HBASE-3939 in 0.92) Invocation wasn't a
      // VersionedWritable and thus the first thing on the wire was always
      // the 2-byte length of the method name.  Because no method name is
      // longer than 255 characters, and all method names are in ASCII,
      // The following code is equivalent to `in.readUTF()', which we can't
      // call again here, because `super.readFields(in)' already consumed
      // the first byte of input, which can't be "unread" back into `in'.
      final short len = (short) (in.readByte() & 0xFF);  // Unsigned byte.
      final byte[] buf = new byte[len];
      in.readFully(buf, 0, len);
      methodName = new String(buf);
    }
  }
  parameters = new Object[in.readInt()];
  parameterClasses = new Class[parameters.length];
  HbaseObjectWritable objectWritable = new HbaseObjectWritable();
  for (int i = 0; i < parameters.length; i++) {
    parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
      this.conf);
    parameterClasses[i] = objectWritable.getDeclaredClass();
  }
}
项目:kafka-connect-hdfs    文件:WALFile.java   
/**
 * Initialize the {@link Reader}
 *
 * @param tempReader <code>true</code> if we are constructing a temporary and hence do not
 *                   initialize every component; <code>false</code> otherwise.
 */
private void init(boolean tempReader) throws IOException {
  byte[] versionBlock = new byte[VERSION.length];
  in.readFully(versionBlock);

  if ((versionBlock[0] != VERSION[0]) ||
      (versionBlock[1] != VERSION[1]) ||
      (versionBlock[2] != VERSION[2])) {
    throw new IOException(this + " not a WALFile");
  }

  // Set 'version'
  version = versionBlock[3];
  if (version > VERSION[3]) {
    throw new VersionMismatchException(VERSION[3], version);
  }

  in.readFully(sync);                       // read sync bytes
  headerEnd = in.getPos();                  // record end of header

  // Initialize... *not* if this we are constructing a temporary Reader
  if (!tempReader) {
    valBuffer = new DataInputBuffer();
    valIn = valBuffer;

    SerializationFactory serializationFactory =
        new SerializationFactory(conf);
    this.keyDeserializer =
        getDeserializer(serializationFactory, WALEntry.class);
    if (this.keyDeserializer == null) {
      throw new IOException(
          "Could not find a deserializer for the Key class: '"
          + WALFile.class.getCanonicalName() + "'. "
          + "Please ensure that the configuration '" +
          CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
          + "properly configured, if you're using "
          + "custom serialization.");
    }

    this.keyDeserializer.open(valBuffer);

    this.valDeserializer =
        getDeserializer(serializationFactory, WALEntry.class);
    if (this.valDeserializer == null) {
      throw new IOException(
          "Could not find a deserializer for the Value class: '"
          + WALEntry.class.getCanonicalName() + "'. "
          + "Please ensure that the configuration '" +
          CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
          + "properly configured, if you're using "
          + "custom serialization.");
    }
    this.valDeserializer.open(valIn);
  }
}
项目:streamx    文件:WALFile.java   
Writer(Configuration conf, Option... opts) throws IOException {
  BlockSizeOption blockSizeOption =
      Options.getOption(BlockSizeOption.class, opts);
  BufferSizeOption bufferSizeOption =
      Options.getOption(BufferSizeOption.class, opts);
  ReplicationOption replicationOption =
      Options.getOption(ReplicationOption.class, opts);

  FileOption fileOption = Options.getOption(FileOption.class, opts);
  AppendIfExistsOption appendIfExistsOption = Options.getOption(
      AppendIfExistsOption.class, opts);
  StreamOption streamOption = Options.getOption(StreamOption.class, opts);

  // check consistency of options
  if ((fileOption == null) == (streamOption == null)) {
    throw new IllegalArgumentException("file or stream must be specified");
  }
  if (fileOption == null && (blockSizeOption != null ||
                             bufferSizeOption != null ||
                             replicationOption != null)) {
    throw new IllegalArgumentException("file modifier options not " +
                                       "compatible with stream");
  }

  FSDataOutputStream out;
  boolean ownStream = fileOption != null;
  if (ownStream) {
    Path p = fileOption.getValue();
    FileSystem fs;
    fs = p.getFileSystem(conf);
    int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
                     bufferSizeOption.getValue();
    short replication = replicationOption == null ?
                        fs.getDefaultReplication(p) :
                        (short) replicationOption.getValue();
    long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
                     blockSizeOption.getValue();

    if (appendIfExistsOption != null && appendIfExistsOption.getValue()
        && fs.exists(p)) {
      // Read the file and verify header details
      try (WALFile.Reader reader =
               new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
        if (reader.getVersion() != VERSION[3]) {
          throw new VersionMismatchException(VERSION[3], reader.getVersion());
        }
        sync = reader.getSync();
      }
      out = fs.append(p, bufferSize);
      this.appendMode = true;
    } else {
      out = fs.create(p, true, bufferSize, replication, blockSize);
    }
  } else {
    out = streamOption.getValue();
  }

  init(conf, out, ownStream);
}
项目:streamx    文件:WALFile.java   
/**
 * Initialize the {@link Reader}
 *
 * @param tempReader <code>true</code> if we are constructing a temporary and hence do not
 *                   initialize every component; <code>false</code> otherwise.
 */
private void init(boolean tempReader) throws IOException {
  byte[] versionBlock = new byte[VERSION.length];
  in.readFully(versionBlock);

  if ((versionBlock[0] != VERSION[0]) ||
      (versionBlock[1] != VERSION[1]) ||
      (versionBlock[2] != VERSION[2])) {
    throw new IOException(this + " not a WALFile");
  }

  // Set 'version'
  version = versionBlock[3];
  if (version > VERSION[3]) {
    throw new VersionMismatchException(VERSION[3], version);
  }

  in.readFully(sync);                       // read sync bytes
  headerEnd = in.getPos();                  // record end of header

  // Initialize... *not* if this we are constructing a temporary Reader
  if (!tempReader) {
    valBuffer = new DataInputBuffer();
    valIn = valBuffer;

    SerializationFactory serializationFactory =
        new SerializationFactory(conf);
    this.keyDeserializer =
        getDeserializer(serializationFactory, WALEntry.class);
    if (this.keyDeserializer == null) {
      throw new IOException(
          "Could not find a deserializer for the Key class: '"
          + WALFile.class.getCanonicalName() + "'. "
          + "Please ensure that the configuration '" +
          CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
          + "properly configured, if you're using "
          + "custom serialization.");
    }

    this.keyDeserializer.open(valBuffer);

    this.valDeserializer =
        getDeserializer(serializationFactory, WALEntry.class);
    if (this.valDeserializer == null) {
      throw new IOException(
          "Could not find a deserializer for the Value class: '"
          + WALEntry.class.getCanonicalName() + "'. "
          + "Please ensure that the configuration '" +
          CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
          + "properly configured, if you're using "
          + "custom serialization.");
    }
    this.valDeserializer.open(valIn);
  }
}
项目:LCIndex-HBase-0.94.16    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:IRIndex    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:RStore    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:HBase-Research    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:hbase-0.94.8-qod    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:hbase-0.94.8-qod    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}
项目:hindex    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  int version = getVersion();
  try {
    super.readFields(in);
  } catch (VersionMismatchException e) {
    /*
     * No API in VersionMismatchException to get the expected and found
     * versions.  We use the only tool available to us: toString(), whose
     * output has a dependency on hadoop-common.  Boo.
     */
    int startIndex = e.toString().lastIndexOf('v') + 1;
    version = Integer.parseInt(e.toString().substring(startIndex));
  }
  hbaseVersion = in.readUTF();
  int count = in.readInt();
  this.liveServers = new HashMap<ServerName, HServerLoad>(count);
  for (int i = 0; i < count; i++) {
    byte [] versionedBytes = Bytes.readByteArray(in);
    HServerLoad hsl = new HServerLoad();
    hsl.readFields(in);
    this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
  }
  count = in.readInt();
  deadServers = new ArrayList<ServerName>(count);
  for (int i = 0; i < count; i++) {
    deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
  }
  count = in.readInt();
  this.intransition = new TreeMap<String, RegionState>();
  for (int i = 0; i < count; i++) {
    String key = in.readUTF();
    RegionState regionState = new RegionState();
    regionState.readFields(in);
    this.intransition.put(key, regionState);
  }
  this.clusterId = in.readUTF();
  int masterCoprocessorsLength = in.readInt();
  masterCoprocessors = new String[masterCoprocessorsLength];
  for(int i = 0; i < masterCoprocessorsLength; i++) {
    masterCoprocessors[i] = in.readUTF();
  }
  // Only read extra fields for master and backup masters if
  // version indicates that we should do so, else use defaults
  if (version >= VERSION_MASTER_BACKUPMASTERS) {
    this.master = ServerName.parseVersionedServerName(
                    Bytes.readByteArray(in));
    count = in.readInt();
    this.backupMasters = new ArrayList<ServerName>(count);
    for (int i = 0; i < count; i++) {
      this.backupMasters.add(ServerName.parseVersionedServerName(
                               Bytes.readByteArray(in)));
    }
  } else {
    this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
                                 ServerName.NON_STARTCODE);
    this.backupMasters = new ArrayList<ServerName>(0);
  }
}