當前位置: 首頁>>代碼示例>>Java>>正文


Java ReflectionUtils.newInstance方法代碼示例

本文整理匯總了Java中org.apache.hadoop.util.ReflectionUtils.newInstance方法的典型用法代碼示例。如果您正苦於以下問題:Java ReflectionUtils.newInstance方法的具體用法?Java ReflectionUtils.newInstance怎麽用?Java ReflectionUtils.newInstance使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.util.ReflectionUtils的用法示例。


在下文中一共展示了ReflectionUtils.newInstance方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testCustomOffsets

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
public void testCustomOffsets() {
  Configuration conf = new Configuration();
  BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 }); 
  BinaryComparable key2 = new BytesWritable(new byte[] { 6, 2, 3, 7, 8 });
  
  BinaryPartitioner.setOffsets(conf, 1, -3);
  BinaryPartitioner<?> partitioner = 
    ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  int partition1 = partitioner.getPartition(key1, null, 10);
  int partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
  
  BinaryPartitioner.setOffsets(conf, 1, 2);
  partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  partition1 = partitioner.getPartition(key1, null, 10);
  partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
  
  BinaryPartitioner.setOffsets(conf, -4, -3);
  partitioner = ReflectionUtils.newInstance(BinaryPartitioner.class, conf);
  partition1 = partitioner.getPartition(key1, null, 10);
  partition2 = partitioner.getPartition(key2, null, 10);
  assertEquals(partition1, partition2);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:TestBinaryPartitioner.java

示例2: setup

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@Override
protected void setup(Context context)
    throws IOException, InterruptedException {
  this.conf = context.getConfiguration();
  this.lobLoader = new LargeObjectLoader(this.conf, new Path( this.conf.get("sqoop.hbase.lob.extern.dir", "/tmp/sqoop-hbase-" + context.getTaskAttemptID())));

  // Get the implementation of PutTransformer to use.
  // By default, we call toString() on every non-null field.
  Class<? extends PutTransformer> xformerClass =
      (Class<? extends PutTransformer>)
      this.conf.getClass(TRANSFORMER_CLASS_KEY, ToStringPutTransformer.class);
  this.putTransformer = (PutTransformer)
      ReflectionUtils.newInstance(xformerClass, this.conf);
  if (null == putTransformer) {
    throw new RuntimeException("Could not instantiate PutTransformer.");
  }
  this.putTransformer.setColumnFamily(conf.get(COL_FAMILY_KEY, null));
  this.putTransformer.setRowKeyColumn(conf.get(ROW_KEY_COLUMN_KEY, null));
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:20,代碼來源:HBaseBulkImportMapper.java

示例3: readFields

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
  super.readFields(in);
  
  // Read the number of entries in the map
  
  int entries = in.readInt();
  
  // Then read each key/value pair
  
  for (int i = 0; i < entries; i++) {
    K key =
      (K) ReflectionUtils.newInstance(getClass(
          in.readByte()), getConf());
    
    key.readFields(in);
    
    Writable value = (Writable) ReflectionUtils.newInstance(getClass(
        in.readByte()), getConf());
    
    value.readFields(in);
    instance.put(key, value);
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:26,代碼來源:SortedMapWritable.java

示例4: configure

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@Override // Mapper
public void configure(JobConf conf) {
  super.configure(conf);

  // grab compression
  String compression = getConf().get("test.io.compression.class", null);
  Class<? extends CompressionCodec> codec;

  // try to initialize codec
  try {
    codec = (compression == null) ? null : 
      Class.forName(compression).asSubclass(CompressionCodec.class);
  } catch(Exception e) {
    throw new RuntimeException("Compression codec not found: ", e);
  }

  if(codec != null) {
    compressionCodec = (CompressionCodec)
        ReflectionUtils.newInstance(codec, getConf());
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:TestDFSIO.java

示例5: testWritable

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
		, Configuration conf) throws Exception {
  DataOutputBuffer dob = new DataOutputBuffer();
  before.write(dob);

  DataInputBuffer dib = new DataInputBuffer();
  dib.reset(dob.getData(), dob.getLength());
  
  Writable after = (Writable)ReflectionUtils.newInstance(
  		before.getClass(), conf);
  after.readFields(dib);

  assertEquals(before, after);
  return after;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:TestWritable.java

示例6: readFields

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
  // First clear the map. Otherwise we will just accumulate
  // entries every time this method is called.
  this.instance.clear();
  // Read the number of entries in the map
  int entries = in.readInt();
  // Then read each key/value pair
  for (int i = 0; i < entries; i++) {
    byte[] key = Bytes.readByteArray(in);
    byte id = in.readByte();
    Class clazz = getClass(id);
    V value = null;
    if (clazz.equals(byte[].class)) {
      byte[] bytes = Bytes.readByteArray(in);
      value = (V) bytes;
    } else {
      Writable w = (Writable) ReflectionUtils.newInstance(clazz, getConf());
      w.readFields(in);
      value = (V) w;
    }
    this.instance.put(key, value);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:IndexFile.java

示例7: writeBadOutput

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
  throws Exception {
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
      TypeConverter.fromYarn(attempt.getID()));
 
  TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat
      .getRecordWriter(tContext);
  
  NullWritable nullWritable = NullWritable.get();
  try {
    theRecordWriter.write(key2, val2);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val2);
    theRecordWriter.write(nullWritable, val1);
    theRecordWriter.write(key1, nullWritable);
    theRecordWriter.write(key2, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key1, val1);
  } finally {
    theRecordWriter.close(tContext);
  }
  
  OutputFormat outputFormat = ReflectionUtils.newInstance(
      tContext.getOutputFormatClass(), conf);
  OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
  committer.commitTask(tContext);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TestRecovery.java

示例8: writeToQueue

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
private void writeToQueue(KEYOUT key, VALUEOUT value) throws IOException,
    InterruptedException {
  this.keyout = (KEYOUT) ReflectionUtils.newInstance(keyClass, conf);
  this.valueout = (VALUEOUT) ReflectionUtils.newInstance(valueClass, conf);
  ReflectionUtils.copy(conf, key, this.keyout);
  ReflectionUtils.copy(conf, value, this.valueout);

  // wait to write output to queuue
  outputQueue.enqueue(new KeyValuePair<KEYOUT, VALUEOUT>(keyout, valueout));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:Chain.java

示例9: combineAndSpill

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:MergeManagerImpl.java

示例10: newInstance

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
/** Create a new instance of a class with a defined factory. */
public static Writable newInstance(Class<? extends Writable> c, Configuration conf) {
  WritableFactory factory = WritableFactories.getFactory(c);
  if (factory != null) {
    Writable result = factory.newInstance();
    if (result instanceof Configurable) {
      ((Configurable) result).setConf(conf);
    }
    return result;
  } else {
    return ReflectionUtils.newInstance(c, conf);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:WritableFactories.java

示例11: decodeIdentifier

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
/**
 * Get the token identifier object, or null if it could not be constructed
 * (because the class could not be loaded, for example).
 * @return the token identifier, or null
 * @throws IOException 
 */
@SuppressWarnings("unchecked")
public T decodeIdentifier() throws IOException {
  Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
  if (cls == null) {
    return null;
  }
  TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
  ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
  DataInputStream in = new DataInputStream(buf);  
  tokenIdentifier.readFields(in);
  in.close();
  return (T) tokenIdentifier;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:20,代碼來源:Token.java

示例12: createDistributedHBaseCluster

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
public void createDistributedHBaseCluster() throws IOException {
  Configuration conf = getConfiguration();
  Class<? extends ClusterManager> clusterManagerClass = conf.getClass(HBASE_CLUSTER_MANAGER_CLASS,
    DEFAULT_HBASE_CLUSTER_MANAGER_CLASS, ClusterManager.class);
  ClusterManager clusterManager = ReflectionUtils.newInstance(
    clusterManagerClass, conf);
  setHBaseCluster(new DistributedHBaseCluster(conf, clusterManager));
  getHBaseAdmin();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:IntegrationTestingUtility.java

示例13: createOutputReader

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
OutputReader createOutputReader(Class<? extends OutputReader> outputReaderClass) 
  throws IOException {
  OutputReader outputReader =
    ReflectionUtils.newInstance(outputReaderClass, job_);
  outputReader.initialize(this);
  return outputReader;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:PipeMapRed.java

示例14: testThreeUnbalancedVolumes

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testThreeUnbalancedVolumes() throws Exception {
  @SuppressWarnings("unchecked")
  final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = 
      ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
  
  List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
  
  // First volume with 1MB free space
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
  
  // Second volume with 3MB free space, which is a difference of 2MB, more
  // than the threshold of 1MB.
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
  
  // Third volume, again with 3MB free space.
  volumes.add(Mockito.mock(FsVolumeSpi.class));
  Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
  
  // We should alternate assigning between the two volumes with a lot of free
  // space.
  initPolicy(policy, 1.0f);
  Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(2), policy.chooseVolume(volumes, 100));

  // All writes should be assigned to the volume with the least free space.
  initPolicy(policy, 0.0f);
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
  Assert.assertEquals(volumes.get(0), policy.chooseVolume(volumes, 100));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestAvailableSpaceVolumeChoosingPolicy.java

示例15: testGzipCompatibility

import org.apache.hadoop.util.ReflectionUtils; //導入方法依賴的package包/類
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:31,代碼來源:TestCodec.java


注:本文中的org.apache.hadoop.util.ReflectionUtils.newInstance方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。