本文整理汇总了Java中org.apache.hadoop.io.VersionMismatchException类的典型用法代码示例。如果您正苦于以下问题:Java VersionMismatchException类的具体用法?Java VersionMismatchException怎么用?Java VersionMismatchException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
VersionMismatchException类属于org.apache.hadoop.io包,在下文中一共展示了VersionMismatchException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
byte version = in.readByte();
switch(version) {
case 1:
majorCode = in.readByte();
minorCode = in.readShort();
args = WritableUtils.readCompressedStringArray(in);
break;
case 2:
majorCode = in.readByte();
minorCode = in.readShort();
args = WritableUtils.readStringArray(in);
break;
default:
throw new VersionMismatchException(VERSION, version);
}
}
示例2: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
byte version = in.readByte();
switch (version) {
case 1:
code = in.readByte();
lastModified = in.readLong();
args = WritableUtils.readCompressedStringArray(in);
break;
case VERSION:
code = in.readByte();
lastModified = in.readLong();
args = WritableUtils.readStringArray(in);
break;
default:
throw new VersionMismatchException(VERSION, version);
}
}
示例3: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
byte version = in.readByte();
switch (version) {
case 1:
majorCode = in.readByte();
minorCode = in.readShort();
args = WritableUtils.readCompressedStringArray(in);
break;
case 2:
majorCode = in.readByte();
minorCode = in.readShort();
args = WritableUtils.readStringArray(in);
break;
default:
throw new VersionMismatchException(VERSION, version);
}
}
示例4: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
fields.clear();
byte version = in.readByte();
if (version != VERSION) {
throw new VersionMismatchException(VERSION, version);
}
int size = WritableUtils.readVInt(in);
for (int i = 0; i < size; i++) {
String name = Text.readString(in);
NutchField field = new NutchField();
field.readFields(in);
fields.put(name, field);
}
weight = in.readFloat();
documentMeta.readFields(in);
}
示例5: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
byte version = in.readByte();
switch(version) {
case 1:
code = in.readByte();
lastModified = in.readLong();
args = WritableUtils.readCompressedStringArray(in);
break;
case VERSION:
code = in.readByte();
lastModified = in.readLong();
args = WritableUtils.readStringArray(in);
break;
default:
throw new VersionMismatchException(VERSION, version);
}
}
示例6: Writer
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
Writer(Configuration conf, Option... opts) throws IOException {
BlockSizeOption blockSizeOption =
Options.getOption(BlockSizeOption.class, opts);
BufferSizeOption bufferSizeOption =
Options.getOption(BufferSizeOption.class, opts);
ReplicationOption replicationOption =
Options.getOption(ReplicationOption.class, opts);
FileOption fileOption = Options.getOption(FileOption.class, opts);
AppendIfExistsOption appendIfExistsOption = Options.getOption(
AppendIfExistsOption.class, opts);
StreamOption streamOption = Options.getOption(StreamOption.class, opts);
// check consistency of options
if ((fileOption == null) == (streamOption == null)) {
throw new IllegalArgumentException("file or stream must be specified");
}
if (fileOption == null && (blockSizeOption != null ||
bufferSizeOption != null ||
replicationOption != null)) {
throw new IllegalArgumentException("file modifier options not " +
"compatible with stream");
}
FSDataOutputStream out;
boolean ownStream = fileOption != null;
if (ownStream) {
Path p = fileOption.getValue();
FileSystem fs;
fs = p.getFileSystem(conf);
int bufferSize = bufferSizeOption == null ? getBufferSize(conf) :
bufferSizeOption.getValue();
short replication = replicationOption == null ?
fs.getDefaultReplication(p) :
(short) replicationOption.getValue();
long blockSize = blockSizeOption == null ? fs.getDefaultBlockSize(p) :
blockSizeOption.getValue();
if (appendIfExistsOption != null && appendIfExistsOption.getValue()
&& fs.exists(p)) {
// Read the file and verify header details
try (WALFile.Reader reader =
new WALFile.Reader(conf, WALFile.Reader.file(p), new Reader.OnlyHeaderOption())){
if (reader.getVersion() != VERSION[3]) {
throw new VersionMismatchException(VERSION[3], reader.getVersion());
}
sync = reader.getSync();
}
out = fs.append(p, bufferSize);
this.appendMode = true;
} else {
out = fs.create(p, true, bufferSize, replication, blockSize);
}
} else {
out = streamOption.getValue();
}
init(conf, out, ownStream);
}
示例7: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public final void readFields(DataInput in) throws IOException {
version = in.readByte();
// incompatible change from UTF8 (version < 5) to Text
if (version != VERSION)
throw new VersionMismatchException(VERSION, version);
status = ParseStatus.read(in);
title = Text.readString(in); // read title
int numOutlinks = in.readInt();
outlinks = new Outlink[numOutlinks];
for (int i = 0; i < numOutlinks; i++) {
outlinks[i] = Outlink.read(in);
}
if (version < 3) {
int propertyCount = in.readInt(); // read metadata
contentMeta.clear();
for (int i = 0; i < propertyCount; i++) {
contentMeta.add(Text.readString(in), Text.readString(in));
}
} else {
contentMeta.clear();
contentMeta.readFields(in);
}
if (version > 3) {
parseMeta.clear();
parseMeta.readFields(in);
}
}
示例8: readFieldsCompressed
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
private final void readFieldsCompressed(DataInput in) throws IOException {
byte oldVersion = in.readByte();
switch (oldVersion) {
case 0:
case 1:
url = Text.readString(in); // read url
base = Text.readString(in); // read base
content = new byte[in.readInt()]; // read content
in.readFully(content);
contentType = Text.readString(in); // read contentType
// reconstruct metadata
int keySize = in.readInt();
String key;
for (int i = 0; i < keySize; i++) {
key = Text.readString(in);
int valueSize = in.readInt();
for (int j = 0; j < valueSize; j++) {
metadata.add(key, Text.readString(in));
}
}
break;
case 2:
url = Text.readString(in); // read url
base = Text.readString(in); // read base
content = new byte[in.readInt()]; // read content
in.readFully(content);
contentType = Text.readString(in); // read contentType
metadata.readFields(in); // read meta data
break;
default:
throw new VersionMismatchException((byte) 2, oldVersion);
}
}
示例9: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public final void readFields(DataInput in) throws IOException {
metadata.clear();
int sizeOrVersion = in.readInt();
if (sizeOrVersion < 0) { // version
version = sizeOrVersion;
switch (version) {
case VERSION:
url = Text.readString(in);
base = Text.readString(in);
content = new byte[in.readInt()];
in.readFully(content);
contentType = Text.readString(in);
metadata.readFields(in);
break;
default:
throw new VersionMismatchException((byte) VERSION, (byte) version);
}
} else { // size
byte[] compressed = new byte[sizeOrVersion];
in.readFully(compressed, 0, compressed.length);
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater = new DataInputStream(
new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
}
}
示例10: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
try {
super.readFields(in);
methodName = in.readUTF();
clientVersion = in.readLong();
clientMethodsHash = in.readInt();
} catch (VersionMismatchException e) {
// VersionMismatchException doesn't provide an API to access
// expectedVersion and foundVersion. This is really sad.
if (e.toString().endsWith("found v0")) {
// Try to be a bit backwards compatible. In previous versions of
// HBase (before HBASE-3939 in 0.92) Invocation wasn't a
// VersionedWritable and thus the first thing on the wire was always
// the 2-byte length of the method name. Because no method name is
// longer than 255 characters, and all method names are in ASCII,
// The following code is equivalent to `in.readUTF()', which we can't
// call again here, because `super.readFields(in)' already consumed
// the first byte of input, which can't be "unread" back into `in'.
final short len = (short) (in.readByte() & 0xFF); // Unsigned byte.
final byte[] buf = new byte[len];
in.readFully(buf, 0, len);
methodName = new String(buf);
}
}
parameters = new Object[in.readInt()];
parameterClasses = new Class[parameters.length];
HbaseObjectWritable objectWritable = new HbaseObjectWritable();
for (int i = 0; i < parameters.length; i++) {
parameters[i] = HbaseObjectWritable.readObject(in, objectWritable,
this.conf);
parameterClasses[i] = objectWritable.getDeclaredClass();
}
}
示例11: readFieldsCompressed
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
private final void readFieldsCompressed(DataInput in) throws IOException {
byte oldVersion = in.readByte();
switch (oldVersion) {
case 0:
case 1:
url = Text.readString(in); // read url
base = Text.readString(in); // read base
content = new byte[in.readInt()]; // read content
in.readFully(content);
contentType = Text.readString(in); // read contentType
// reconstruct metadata
int keySize = in.readInt();
String key;
for (int i = 0; i < keySize; i++) {
key = Text.readString(in);
int valueSize = in.readInt();
for (int j = 0; j < valueSize; j++) {
metadata.add(key, Text.readString(in));
}
}
break;
case 2:
url = Text.readString(in); // read url
base = Text.readString(in); // read base
content = new byte[in.readInt()]; // read content
in.readFully(content);
contentType = Text.readString(in); // read contentType
metadata.readFields(in); // read meta data
break;
default:
throw new VersionMismatchException((byte)2, oldVersion);
}
}
示例12: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public final void readFields(DataInput in) throws IOException {
metadata.clear();
int sizeOrVersion = in.readInt();
if (sizeOrVersion < 0) { // version
version = sizeOrVersion;
switch (version) {
case VERSION:
url = Text.readString(in);
base = Text.readString(in);
content = new byte[in.readInt()];
in.readFully(content);
contentType = Text.readString(in);
metadata.readFields(in);
break;
default:
throw new VersionMismatchException((byte)VERSION, (byte)version);
}
} else { // size
byte[] compressed = new byte[sizeOrVersion];
in.readFully(compressed, 0, compressed.length);
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater =
new DataInputStream(new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
}
}
示例13: init
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
/**
* Initialize the {@link Reader}
*
* @param tempReader <code>true</code> if we are constructing a temporary and hence do not
* initialize every component; <code>false</code> otherwise.
*/
private void init(boolean tempReader) throws IOException {
byte[] versionBlock = new byte[VERSION.length];
in.readFully(versionBlock);
if ((versionBlock[0] != VERSION[0]) ||
(versionBlock[1] != VERSION[1]) ||
(versionBlock[2] != VERSION[2])) {
throw new IOException(this + " not a WALFile");
}
// Set 'version'
version = versionBlock[3];
if (version > VERSION[3]) {
throw new VersionMismatchException(VERSION[3], version);
}
in.readFully(sync); // read sync bytes
headerEnd = in.getPos(); // record end of header
// Initialize... *not* if this we are constructing a temporary Reader
if (!tempReader) {
valBuffer = new DataInputBuffer();
valIn = valBuffer;
SerializationFactory serializationFactory =
new SerializationFactory(conf);
this.keyDeserializer =
getDeserializer(serializationFactory, WALEntry.class);
if (this.keyDeserializer == null) {
throw new IOException(
"Could not find a deserializer for the Key class: '"
+ WALFile.class.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using "
+ "custom serialization.");
}
this.keyDeserializer.open(valBuffer);
this.valDeserializer =
getDeserializer(serializationFactory, WALEntry.class);
if (this.valDeserializer == null) {
throw new IOException(
"Could not find a deserializer for the Value class: '"
+ WALEntry.class.getCanonicalName() + "'. "
+ "Please ensure that the configuration '" +
CommonConfigurationKeys.IO_SERIALIZATIONS_KEY + "' is "
+ "properly configured, if you're using "
+ "custom serialization.");
}
this.valDeserializer.open(valIn);
}
}
示例14: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
int version = getVersion();
try {
super.readFields(in);
} catch (VersionMismatchException e) {
/*
* No API in VersionMismatchException to get the expected and found
* versions. We use the only tool available to us: toString(), whose
* output has a dependency on hadoop-common. Boo.
*/
int startIndex = e.toString().lastIndexOf('v') + 1;
version = Integer.parseInt(e.toString().substring(startIndex));
}
hbaseVersion = in.readUTF();
int count = in.readInt();
this.liveServers = new HashMap<ServerName, HServerLoad>(count);
for (int i = 0; i < count; i++) {
byte [] versionedBytes = Bytes.readByteArray(in);
HServerLoad hsl = new HServerLoad();
hsl.readFields(in);
this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
}
count = in.readInt();
deadServers = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
}
count = in.readInt();
this.intransition = new TreeMap<String, RegionState>();
for (int i = 0; i < count; i++) {
String key = in.readUTF();
RegionState regionState = new RegionState();
regionState.readFields(in);
this.intransition.put(key, regionState);
}
this.clusterId = in.readUTF();
int masterCoprocessorsLength = in.readInt();
masterCoprocessors = new String[masterCoprocessorsLength];
for(int i = 0; i < masterCoprocessorsLength; i++) {
masterCoprocessors[i] = in.readUTF();
}
// Only read extra fields for master and backup masters if
// version indicates that we should do so, else use defaults
if (version >= VERSION_MASTER_BACKUPMASTERS) {
this.master = ServerName.parseVersionedServerName(
Bytes.readByteArray(in));
count = in.readInt();
this.backupMasters = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
this.backupMasters.add(ServerName.parseVersionedServerName(
Bytes.readByteArray(in)));
}
} else {
this.master = new ServerName(ServerName.UNKNOWN_SERVERNAME, -1,
ServerName.NON_STARTCODE);
this.backupMasters = new ArrayList<ServerName>(0);
}
}
示例15: readFields
import org.apache.hadoop.io.VersionMismatchException; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
int version = getVersion();
try {
super.readFields(in);
} catch (VersionMismatchException e) {
/*
* No API in VersionMismatchException to get the expected and found
* versions. We use the only tool available to us: toString(), whose
* output has a dependency on hadoop-common. Boo.
*/
int startIndex = e.toString().lastIndexOf('v') + 1;
version = Integer.parseInt(e.toString().substring(startIndex));
}
hbaseVersion = in.readUTF();
int count = in.readInt();
this.liveServers = new HashMap<ServerName, HServerLoad>(count);
for (int i = 0; i < count; i++) {
byte [] versionedBytes = Bytes.readByteArray(in);
HServerLoad hsl = new HServerLoad();
hsl.readFields(in);
this.liveServers.put(ServerName.parseVersionedServerName(versionedBytes), hsl);
}
count = in.readInt();
deadServers = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
deadServers.add(ServerName.parseVersionedServerName(Bytes.readByteArray(in)));
}
count = in.readInt();
this.intransition = new TreeMap<String, RegionState>();
for (int i = 0; i < count; i++) {
String key = in.readUTF();
RegionState regionState = new RegionState();
regionState.readFields(in);
this.intransition.put(key, regionState);
}
this.clusterId = in.readUTF();
int masterCoprocessorsLength = in.readInt();
masterCoprocessors = new String[masterCoprocessorsLength];
for(int i = 0; i < masterCoprocessorsLength; i++) {
masterCoprocessors[i] = in.readUTF();
}
// Only read extra fields for master and backup masters if
// version indicates that we should do so, else use defaults
if (version >= VERSION_MASTER_BACKUPMASTERS) {
this.master = ServerName.parseVersionedServerName(
Bytes.readByteArray(in));
count = in.readInt();
this.backupMasters = new ArrayList<ServerName>(count);
for (int i = 0; i < count; i++) {
this.backupMasters.add(ServerName.parseVersionedServerName(
Bytes.readByteArray(in)));
}
} else {
this.master = new ServerName(UNKNOWN_SERVERNAME, -1,
ServerName.NON_STARTCODE);
this.backupMasters = new ArrayList<ServerName>(0);
}
}