本文整理汇总了Java中org.apache.hadoop.conf.Configurable类的典型用法代码示例。如果您正苦于以下问题:Java Configurable类的具体用法?Java Configurable怎么用?Java Configurable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Configurable类属于org.apache.hadoop.conf包,在下文中一共展示了Configurable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBarWritable
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
@Test
public void testBarWritable() throws Exception {
System.out.println("Testing Writable, Configurable wrapped in GenericWritable");
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Bar bar = new Bar();
bar.setConf(conf);
generic.set(bar);
//test writing generic writable
FooGenericWritable after
= (FooGenericWritable)TestWritable.testWritable(generic, conf);
//test configuration
System.out.println("Testing if Configuration is passed to wrapped classes");
assertTrue(after.get() instanceof Configurable);
assertNotNull(((Configurable)after.get()).getConf());
}
示例2: getCurrentValue
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
/**
* Get the 'value' corresponding to the last read 'key'.
*
* @param val : The 'value' to be read.
*/
public synchronized void getCurrentValue(Writable val)
throws IOException {
if (val instanceof Configurable) {
((Configurable) val).setConf(this.conf);
}
// Position stream to 'current' value
seekToCurrentValue();
val.readFields(valIn);
if (valIn.read() > 0) {
log.info("available bytes: " + valIn.available());
throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength)
+ " bytes, should read " +
(valBuffer.getLength() - keyLength));
}
}
示例3: testBarWritable
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
public void testBarWritable() throws Exception {
System.out.println("Testing Writable, Configurable wrapped in GenericWritable");
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Bar bar = new Bar();
bar.setConf(conf);
generic.set(bar);
//test writing generic writable
FooGenericWritable after
= (FooGenericWritable)TestWritable.testWritable(generic, conf);
//test configuration
System.out.println("Testing if Configuration is passed to wrapped classes");
assertTrue(after.get() instanceof Configurable);
assertNotNull(((Configurable)after.get()).getConf());
}
示例4: getCodec
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
/**
* Returns the CompressionCodec for the message.
*
* @param header
* @return
* @throws InstantiationException
* @throws IllegalAccessException
* @throws ClassNotFoundException
*/
private final CompressionCodec getCodec(Header header)
throws InstantiationException, IllegalAccessException,
ClassNotFoundException {
String codecClassName = header.getCodecClassName();
// although we might create more than one CompressionCodec
// we don't care based its more expensive to synchronise than simply
// creating more than one instance of the CompressionCodec
CompressionCodec codec = codecMap.get(codecClassName);
if (codec == null) {
codec = (CompressionCodec) Thread.currentThread()
.getContextClassLoader().loadClass(codecClassName)
.newInstance();
if (codec instanceof Configurable) {
((Configurable) codec).setConf(hadoopConf);
}
codecMap.put(codecClassName, codec);
}
return codec;
}
示例5: readObject
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
// read the parent fields and the final fields
in.defaultReadObject();
// the job conf knows how to deserialize itself
jobConf = new JobConf();
jobConf.readFields(in);
try {
hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType);
}
catch (Exception e) {
throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e);
}
if (hadoopInputSplit instanceof Configurable) {
((Configurable) hadoopInputSplit).setConf(this.jobConf);
}
else if (hadoopInputSplit instanceof JobConfigurable) {
((JobConfigurable) hadoopInputSplit).configure(this.jobConf);
}
hadoopInputSplit.readFields(in);
}
示例6: FallbackNameNodeAddress
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
public FallbackNameNodeAddress(InstanceId id, String nameService,
Configurable parent) {
super(id, nameService, parent);
clientProtocol = new ConfigAddressHolder(DFS_NAMENODE_RPC_ADDRESS_KEY,
parent);
avatarProtocol = new AddressHolder<InetSocketAddress>(parent) {
@Override
public String getKey() {
return AVATARNODE_PORT;
}
@Override
public InetSocketAddress getAddress() throws UnknownHostException {
InetSocketAddress clientProtocolAddress = clientProtocol.getAddress();
return new InetSocketAddress(clientProtocolAddress.getAddress(),
getConf().getInt(AVATARNODE_PORT,
clientProtocolAddress.getPort() + 1));
}
};
httpProtocol = new ConfigAddressHolder(DFS_NAMENODE_HTTP_ADDRESS_KEY, parent);
datanodeProtocol = new ConfigAddressHolder(DATANODE_PROTOCOL_ADDRESS,
parent);
}
示例7: AvatarNameSpaceAddressManager
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
public AvatarNameSpaceAddressManager(Configurable confg, String serviceName) {
super(confg);
znodePath = confg.getConf().get(DFS_CLUSTER_NAME) + "/" + serviceName;
nodeZero = new AvatarNodeAddress(InstanceId.NODEZERO, serviceName, confg);
nodeOne = new AvatarNodeAddress(InstanceId.NODEONE, serviceName, confg);
try {
// We would like to update the primary and standby references at construction
// but
// not throw an exception here.
refreshZookeeperInfo();
} catch (IOException ioe) {
LOG.error(ioe);
} catch (KeeperException kpe) {
LOG.error(kpe);
} catch (InterruptedException ie) {
LOG.error(ie);
}
}
示例8: createEncryptionMaterialsProvider
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
private static EncryptionMaterialsProvider createEncryptionMaterialsProvider(Configuration hadoopConfig)
{
String empClassName = hadoopConfig.get(S3_ENCRYPTION_MATERIALS_PROVIDER);
if (empClassName == null) {
return null;
}
try {
Object instance = Class.forName(empClassName).getConstructor().newInstance();
if (!(instance instanceof EncryptionMaterialsProvider)) {
throw new RuntimeException("Invalid encryption materials provider class: " + instance.getClass().getName());
}
EncryptionMaterialsProvider emp = (EncryptionMaterialsProvider) instance;
if (emp instanceof Configurable) {
((Configurable) emp).setConf(hadoopConfig);
}
return emp;
}
catch (ReflectiveOperationException e) {
throw new RuntimeException("Unable to load or create S3 encryption materials provider: " + empClassName, e);
}
}
示例9: getHadoopConfiguration
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
private static Configuration getHadoopConfiguration() {
ResourceConfiguration asakusa = ResourceBroker.find(ResourceConfiguration.class);
if (asakusa instanceof Configurable) {
Configuration conf = ((Configurable) asakusa).getConf();
if (conf != null) {
return conf;
}
}
Configuration hadoop = ResourceBroker.find(Configuration.class);
if (hadoop != null) {
return hadoop;
}
throw new IllegalStateException(MessageFormat.format(
"required resource has not been prepared yet: {0}",
Configuration.class));
}
示例10: createNewRecordReader
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
/**
* Create new record record from the original InputFormat and initialize it.
*
* @throws IOException
* @throws InterruptedException
*/
private void createNewRecordReader() throws IOException,
InterruptedException
{
FileSplit split =
new FileSplit(combineFileSplit.getPath(currentFileIndex),
combineFileSplit.getOffset(currentFileIndex),
combineFileSplit.getLength(currentFileIndex),
null);
if (split instanceof Configurable)
{
((Configurable) split).setConf(context.getConfiguration());
}
current = inputFormat.createRecordReader(split, context);
current.initialize(split, context);
}
示例11: encodeCellsTo
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec,
CompressionCodec compressor) throws IOException {
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
}
encoder.flush();
} catch (BufferOverflowException | IndexOutOfBoundsException e) {
throw new DoNotRetryIOException(e);
} finally {
os.close();
if (poolCompressor != null) {
CodecPool.returnCompressor(poolCompressor);
}
}
}
示例12: createCompressionStream
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
public OutputStream createCompressionStream(
OutputStream downStream, Compressor compressor, int downStreamBufferSize)
throws IOException {
CompressionCodec codec = getCodec(conf);
OutputStream bos1 = null;
if (downStreamBufferSize > 0) {
bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
}
else {
bos1 = downStream;
}
((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024);
CompressionOutputStream cos =
codec.createOutputStream(bos1, compressor);
BufferedOutputStream bos2 =
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
DATA_OBUF_SIZE);
return bos2;
}
示例13: read
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
@Override
public void read(DataInputView in) throws IOException {
this.splitNumber=in.readInt();
this.hadoopInputSplitTypeName = in.readUTF();
if(hadoopInputSplit == null) {
try {
Class<? extends org.apache.hadoop.io.Writable> inputSplit =
Class.forName(hadoopInputSplitTypeName).asSubclass(org.apache.hadoop.io.Writable.class);
this.hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance( inputSplit );
}
catch (Exception e) {
throw new RuntimeException("Unable to create InputSplit", e);
}
}
jobConf = new JobConf();
jobConf.readFields(in);
if (this.hadoopInputSplit instanceof Configurable) {
((Configurable) this.hadoopInputSplit).setConf(this.jobConf);
}
this.hadoopInputSplit.readFields(in);
}
示例14: getConf
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
@Override
public Configuration getConf() {
if (expression instanceof Configurable) {
return ((Configurable) expression).getConf();
}
return null;
}
示例15: setConf
import org.apache.hadoop.conf.Configurable; //导入依赖的package包/类
/**
* Check and set 'configuration' if necessary.
*
* @param theObject object for which to set configuration
* @param conf Configuration
*/
public static void setConf(Object theObject, Configuration conf) {
if (conf != null) {
if (theObject instanceof Configurable) {
((Configurable) theObject).setConf(conf);
}
setJobConf(theObject, conf);
}
}