当前位置: 首页>>代码示例>>Java>>正文


Java ReflectionUtils.newInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.ReflectionUtils.newInstance方法的典型用法代码示例。如果您正苦于以下问题:Java ReflectionUtils.newInstance方法的具体用法?Java ReflectionUtils.newInstance怎么用?Java ReflectionUtils.newInstance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.ReflectionUtils的用法示例。


在下文中一共展示了ReflectionUtils.newInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCustomOffsets

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
public void testCustomOffsets() {
  Configuration conf = new Configuration();
  BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 }); 
  BinaryComparable key2 = new BytesWritable(new byte[] { 6, 2, 3, 7, 8 });
  
  BinaryPartitioner.setOffsets(conf, 1, -3);
  BinaryPartitioner<?> partitioner = 
    ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  int partition1 = partitioner.getPartition(key1, null, 10);
  int partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
  
  BinaryPartitioner.setOffsets(conf, 1, 2);
  partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  partition1 = partitioner.getPartition(key1, null, 10);
  partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
  
  BinaryPartitioner.setOffsets(conf, -4, -3);
  partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  partition1 = partitioner.getPartition(key1, null, 10);
  partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestBinaryPartitioner.java

示例2: setup

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Override
protected void setup(Context context)
    throws IOException, InterruptedException {
  this.conf = context.getConfiguration();
  this.lobLoader = new LargeObjectLoader(this.conf, new Path( this.conf.get("sqoop.hbase.lob.extern.dir", "/tmp/sqoop-hbase-" + context.getTaskAttemptID())));

  // Get the implementation of PutTransformer to use.
  // By default, we call toString() on every non-null field.
  Class<? extends PutTransformer> xformerClass =
      (Class<? extends PutTransformer>)
      this.conf.getClass(TRANSFORMER_CLASS_KEY, ToStringPutTransformer.class);
  this.putTransformer = (PutTransformer)
      ReflectionUtils.newInstance(xformerClass, this.conf);
  if (null == putTransformer) {
    throw new RuntimeException("Could not instantiate PutTransformer.");
  }
  this.putTransformer.setColumnFamily(conf.get(COL_FAMILY_KEY, null));
  this.putTransformer.setRowKeyColumn(conf.get(ROW_KEY_COLUMN_KEY, null));
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:20,代码来源:HBaseBulkImportMapper.java

示例3: readFields

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
  super.readFields(in);
  
  // Read the number of entries in the map
  
  int entries = in.readInt();
  
  // Then read each key/value pair
  
  for (int i = 0; i < entries; i++) {
    K key =
      (K) ReflectionUtils.newInstance(getClass(
          in.readByte()), getConf());
    
    key.readFields(in);
    
    Writable value = (Writable) ReflectionUtils.newInstance(getClass(
        in.readByte()), getConf());
    
    value.readFields(in);
    instance.put(key, value);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:SortedMapWritable.java

示例4: configure

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Override // Mapper
public void configure(JobConf conf) {
  super.configure(conf);

  // grab compression
  String compression = getConf().get("test.io.compression.class", null);
  Class<? extends CompressionCodec> codec;

  // try to initialize codec
  try {
    codec = (compression == null) ? null : 
      Class.forName(compression).asSubclass(CompressionCodec.class);
  } catch(Exception e) {
    throw new RuntimeException("Compression codec not found: ", e);
  }

  if(codec != null) {
    compressionCodec = (CompressionCodec)
        ReflectionUtils.newInstance(codec, getConf());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestDFSIO.java

示例5: testWritable

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
		, Configuration conf) throws Exception {
  DataOutputBuffer dob = new DataOutputBuffer();
  before.write(dob);

  DataInputBuffer dib = new DataInputBuffer();
  dib.reset(dob.getData(), dob.getLength());
  
  Writable after = (Writable)ReflectionUtils.newInstance(
  		before.getClass(), conf);
  after.readFields(dib);

  assertEquals(before, after);
  return after;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestWritable.java

示例6: readFields

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
  // First clear the map. Otherwise we will just accumulate
  // entries every time this method is called.
  this.instance.clear();
  // Read the number of entries in the map
  int entries = in.readInt();
  // Then read each key/value pair
  for (int i = 0; i < entries; i++) {
    byte[] key = Bytes.readByteArray(in);
    byte id = in.readByte();
    Class clazz = getClass(id);
    V value = null;
    if (clazz.equals(byte[].class)) {
      byte[] bytes = Bytes.readByteArray(in);
      value = (V) bytes;
    } else {
      Writable w = (Writable) ReflectionUtils.newInstance(clazz, getConf());
      w.readFields(in);
      value = (V) w;
    }
    this.instance.put(key, value);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:IndexFile.java

示例7: writeBadOutput

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
 
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2, val2);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val2);
    theRecordWriter.write(nullWritable, val1);
    theRecordWriter.write(key1, nullWritable);
    theRecordWriter.write(key2, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key1, val1);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestRecovery.java

示例8: writeToQueue

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private void writeToQueue(KEYOUT key, VALUEOUT value) throws IOException,
    InterruptedException {
  this.keyout = (KEYOUT) ReflectionUtils.newInstance(keyClass, conf);
  this.valueout = (VALUEOUT) ReflectionUtils.newInstance(valueClass, conf);
  ReflectionUtils.copy(conf, key, this.keyout);
  ReflectionUtils.copy(conf, value, this.valueout);

  // wait to write output to queuue
  outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(keyout, valueout));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:Chain.java

示例9: combineAndSpill

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:MergeManagerImpl.java

示例10: newInstance

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/** Create a new instance of a class with a defined factory. */
public static Writable newInstance(Class<? extends Writable> c, Configuration conf) {
  WritableFactory factory = WritableFactories.getFactory(c);
  if (factory != null) {
    Writable result = factory.newInstance();
    if (result instanceof Configurable) {
      ((Configurable) result).setConf(conf);
    }
    return result;
  } else {
    return ReflectionUtils.newInstance(c, conf);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:WritableFactories.java

示例11: decodeIdentifier

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
/**
 * Get the token identifier object, or null if it could not be constructed
 * (because the class could not be loaded, for example).
 * @return the token identifier, or null
 * @throws IOException 
 */
@SuppressWarnings("unchecked")
public T decodeIdentifier() throws IOException {
  Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
  if (cls == null) {
    return null;
  }
  TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
  ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
  DataInputStream in = new DataInputStream(buf);  
  tokenIdentifier.readFields(in);
  in.close();
  return (T) tokenIdentifier;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:Token.java

示例12: createDistributedHBaseCluster

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
public void createDistributedHBaseCluster() throws IOException {
  Configuration conf = getConfiguration();
  Class<? extends ClusterManager> clusterManagerClass = conf.getClass(HBASE_CLUSTER_MANAGER_CLASS,
    DEFAULT_HBASE_CLUSTER_MANAGER_CLASS, ClusterManager.class);
  ClusterManager clusterManager = ReflectionUtils.newInstance(
    clusterManagerClass, conf);
  setHBaseCluster(new DistributedHBaseCluster(conf, clusterManager));
  getHBaseAdmin();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:IntegrationTestingUtility.java

示例13: createOutputReader

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
OutputReader createOutputReader(Class<? extends OutputReader> outputReaderClass) 
  throws IOException {
  OutputReader outputReader =
    ReflectionUtils.newInstance(outputReaderClass, job_);
  outputReader.initialize(this);
  return outputReader;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:PipeMapRed.java

示例14: testThreeUnbalancedVolumes

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testThreeUnbalancedVolumes() throws Exception {
  @SuppressWarnings("unchecked")
  final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = 
      ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
  
  List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
  
  // First volume with 1MB free space
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
  
  // Second volume with 3MB free space, which is a difference of 2MB, more
  // than the threshold of 1MB.
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
  
  // Third volume, again with 3MB free space.
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
  
  // We should alternate assigning between the two volumes with a lot of free
  // space.
  initPolicy(policy, 1.0f);
  Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));

  // All writes should be assigned to the volume with the least free space.
  initPolicy(policy, 0.0f);
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestAvailableSpaceVolumeChoosingPolicy.java

示例15: testGzipCompatibility

import org.apache.hadoop.util.ReflectionUtils; //导入方法依赖的package包/类
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestCodec.java


注:本文中的org.apache.hadoop.util.ReflectionUtils.newInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。