当前位置: 首页>>代码示例>>Java>>正文


Java ShortWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.ShortWritable的典型用法代码示例。如果您正苦于以下问题:Java ShortWritable类的具体用法?Java ShortWritable怎么用?Java ShortWritable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ShortWritable类属于org.apache.hadoop.io包,在下文中一共展示了ShortWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getWritableInstance

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/**
 * Returns the corresponding Writable object for this column type.
 */
public Writable getWritableInstance(com.cloudera.recordservice.core.Schema.Type type) {
  switch (type) {
    case BOOLEAN: return new BooleanWritable();
    case TINYINT: return new ByteWritable();
    case SMALLINT: return new ShortWritable();
    case INT: return new IntWritable();
    case BIGINT: return new LongWritable();
    case FLOAT: return new FloatWritable();
    case DOUBLE: return new DoubleWritable();
    case VARCHAR:
    case CHAR:
    case STRING: return new Text();
    case TIMESTAMP_NANOS: return new TimestampNanosWritable();
    case DECIMAL: return new DecimalWritable();
    default: throw new UnsupportedOperationException(
        "Unexpected type: " + toString());
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:22,代码来源:RecordServiceRecord.java

示例2: compareTo

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
public int compareTo(ShortArrayWritable that) {
	Writable[] self = this.get();
	Writable[] other = that.get();

	if (self.length != other.length) {
		// Length decides first.
		return Integer.valueOf(self.length).compareTo(Integer.valueOf(other.length));
	} else {
		// Then, compare every pair of elements.
		for (int i = 0; i < self.length; i++) {
			short s = ((ShortWritable) self[i]).get();
			short o = ((ShortWritable) other[i]).get();
			if (s != o) return Integer.valueOf(s).compareTo(Integer.valueOf(o));
		}
		// Same length, same elements => same array.
		return 0;
	}
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:19,代码来源:ShortArrayWritable.java

示例3: reduce

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
@Override
protected void reduce(ShortWritable inKey,
		Iterable<FloatArrayWritable> inValues, Context context)
		throws IOException, InterruptedException {

	// This task sums all the partial results for one stripe of the vector
	// v_k and adds the teleportation factor.

	Configuration conf = context.getConfiguration();
	int numPages = Integer.parseInt(conf.get("pagerank.num_pages"));
	float beta = Float.parseFloat(conf.get("pagerank.damping_factor"));

	FloatWritable[] vi = null;

	for (FloatArrayWritable inValue : inValues) {
		Writable[] partialVi = inValue.get();

		if (vi == null) {
			// vi is initialized here in order to know the correct size of
			// the stripe (the last stripe can be incomplete).
			vi = new FloatWritable[partialVi.length];
			for (int k = 0; k < vi.length; k++) {
				vi[k] = new FloatWritable(0);
			}
		}

		// Sum the partial results.
		for (int k = 0; k < vi.length; k++) {
			vi[k].set(vi[k].get() + ((FloatWritable) partialVi[k]).get());
		}
	}

	// Add the teleportation factor.
	for (int k = 0; k < vi.length; k++) {
		 vi[k].set(beta * vi[k].get() + (1 - beta) / numPages);
	}

	context.write(inKey, new FloatArrayWritable(vi));
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:40,代码来源:PageRankIterationReducer.java

示例4: pageRankIteration

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
private void pageRankIteration(int iter, Configuration conf, Path outputDir)
		throws Exception {

	// This job performs an iteration of the power iteration method to
	// compute PageRank. The map task processes each block M_{i,j}, loads
	// the corresponding stripe j of the vector v_{k-1} and produces the
	// partial result of the stripe i of the vector v_k. The reduce task
	// sums all the partial results of v_k and adds the teleportation factor
	// (the combiner only sums all the partial results). See Section 5.2
	// (and 5.2.3 in particular) of Mining of Massive Datasets
	// (http://infolab.stanford.edu/~ullman/mmds.html) for details. The
	// output is written in a "vk" subdir of the output dir, where k is the
	// iteration number. MapFileOutputFormat is used to keep an array of the
	// stripes of v.

	Job job = Job.getInstance(conf, "PageRank:Iteration");

	job.setJarByClass(PageRank.class);
	job.setInputFormatClass(SequenceFileInputFormat.class);
	job.setMapperClass(PageRankIterationMapper.class);
	job.setMapOutputKeyClass(ShortWritable.class);
	job.setMapOutputValueClass(FloatArrayWritable.class);
	job.setCombinerClass(PageRankIterationCombiner.class);
	job.setReducerClass(PageRankIterationReducer.class);
	job.setOutputFormatClass(MapFileOutputFormat.class);
	job.setOutputKeyClass(ShortWritable.class);
	job.setOutputValueClass(FloatArrayWritable.class);
	FileInputFormat.addInputPath(job, new Path(outputDir, "M"));
	FileOutputFormat.setOutputPath(job, new Path(outputDir, "v" + iter));

	job.waitForCompletion(true);
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:33,代码来源:PageRank.java

示例5: map

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
@Override
public void map(ShortWritable inKey, FloatArrayWritable inValue,
		Context context) throws IOException, InterruptedException {

	Configuration conf = context.getConfiguration();
	short blockSize = Short.parseShort(conf.get("pagerank.block_size"));
	int topResults = Integer.parseInt(conf.get("pagerank.top_results"));

	Writable[] vStripe = inValue.get();
	for (int i = 0; i < vStripe.length; i++) {
		int page = 1 + (inKey.get() - 1) * blockSize + i;
		float pageRank = ((FloatWritable) vStripe[i]).get();

		// The elements in the queue are sorted (in non-decreasing order) by
		// PageRank. The queue is filled up until it contains topResults
		// elements. Then, a new element will be added only if its PageRank
		// is greater than the lowest PageRank in the queue. If the queue is
		// full and a new element is added, the one with the lowest PageRank
		// is removed from the queue.
		if (topN.size() < topResults || pageRank >= topN.peek().getKey()) {
			topN.add(new AbstractMap.SimpleEntry<Float, Integer>(pageRank, page));
			if (topN.size() > topResults) {
				topN.poll();
			}
		}
	}
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:28,代码来源:PageRankTopNMapper.java

示例6: reduce

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
@Override
protected void reduce(ShortWritable inKey,
		Iterable<FloatArrayWritable> inValues, Context context)
		throws IOException, InterruptedException {

	// This task sums all the partial results for one stripe of the vector
	// v_k. It is a separate class since PageRankIterationReducer also adds
	// the teleportation factor.

	FloatWritable[] vi = null;

	for (FloatArrayWritable inValue : inValues) {
		Writable[] partialVi = inValue.get();

		if (vi == null) {
			// vi is initialized here in order to know the correct size of
			// the stripe (the last stripe can be incomplete).
			vi = new FloatWritable[partialVi.length];
			for (int k = 0; k < vi.length; k++) {
				vi[k] = new FloatWritable(0);
			}
		}

		// Sum the partial results.
		for (int k = 0; k < vi.length; k++) {
			vi[k].set(vi[k].get() + ((FloatWritable) partialVi[k]).get());
		}
	}

	context.write(inKey, new FloatArrayWritable(vi));
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:32,代码来源:PageRankIterationCombiner.java

示例7: setupMapper

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
private void setupMapper(String intermediateTable) throws IOException {
//        FileInputFormat.setInputPaths(job, input);

        String[] dbTableNames = HadoopUtil.parseHiveTableName(intermediateTable);
        HCatInputFormat.setInput(job, dbTableNames[0],
                dbTableNames[1]);
        
        job.setInputFormatClass(HCatInputFormat.class);
        job.setMapperClass(FactDistinctColumnsMapper.class);
        job.setCombinerClass(FactDistinctColumnsCombiner.class);
        job.setMapOutputKeyClass(ShortWritable.class);
        job.setMapOutputValueClass(Text.class);
    }
 
开发者ID:KylinOLAP,项目名称:Kylin,代码行数:14,代码来源:FactDistinctColumnsJob.java

示例8: writeObject

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/**
 * Writes an {@link Object} to the {@link DataOutput}.
 * 
 * @param obj
 *            the object to write.
 * @param out
 *            the data output stream.
 * @throws IOException
 *             if I/O error occurs.
 */
public static final void writeObject(Object obj, DataOutput out)
		throws IOException {
	try {
		if (obj == null) {
			throw new IOException("Writing object is not defined: null.");
		} else if (ClassUtils.isBoolean(obj)) {
			(new BooleanWritable((boolean) obj)).write(out);
		} else if (ClassUtils.isByte(obj)) {
			(new ByteWritable((byte) obj)).write(out);
		} else if (ClassUtils.isShort(obj)) {
			(new ShortWritable((short) obj)).write(out);
		} else if (ClassUtils.isInteger(obj)) {
			(new IntWritable((int) obj)).write(out);
		} else if (ClassUtils.isLong(obj)) {
			(new LongWritable((long) obj)).write(out);
		} else if (ClassUtils.isFloat(obj)) {
			(new FloatWritable((float) obj)).write(out);
		} else if (ClassUtils.isDouble(obj)) {
			(new DoubleWritable((double) obj)).write(out);
		} else if (ClassUtils.isString(obj)) {
			Text.writeString(out, (String) obj);
		} else if (ClassUtils.isEnum(obj)) {
			(new IntWritable(((Enum<?>) obj).ordinal())).write(out);
		} else if (ClassUtils.isArray(obj)) {
			int length = Array.getLength(obj);
			writeObject(length, out);
			for (int j = 0; j < length; j++) {
				writeObject(Array.get(obj, j), out);
			}
		} else {
			((Writable) obj).write(out);
		}
	} catch (IllegalArgumentException exc) {
		throw new IOException(exc);
	}
}
 
开发者ID:SHAF-WORK,项目名称:shaf,代码行数:47,代码来源:IOUtils.java

示例9: readShort

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/** read short value */
static short readShort(DataInput in) throws IOException {
  ShortWritable uShort = TL_DATA.get().U_SHORT;
  uShort.readFields(in);
  return uShort.get();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:FSImageSerialization.java

示例10: writeShort

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/** write short value */
static void writeShort(short value, DataOutputStream out) throws IOException {
  ShortWritable uShort = TL_DATA.get().U_SHORT;
  uShort.set(value);
  uShort.write(out);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:FSImageSerialization.java

示例11: getPrimitiveWritableObject

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
@Override
public ShortWritable getPrimitiveWritableObject(Object o) {
	return o == null ? null : new ShortWritable((Short)o);
}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:5,代码来源:PhoenixShortObjectInspector.java

示例12: reset

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/**
 * Resets the data in this RecordServiceRecord by translating the column data from the
 * given Row to the internal array of Writables (columnVals_).
 * Reads the column data from the given Row into this RecordServiceRecord. The
 * schema are expected to match, minimal error checks are performed.
 * This is a performance critical method.
 */
public void reset(Record record) {
  if (record.getSchema().cols.size() != schema_.getNumColumns()) {
    throw new IllegalArgumentException(String.format("Schema for new record does " +
      "not match existing schema: %d (new) != %d (existing)",
      record.getSchema().cols.size(), schema_.getNumColumns()));
  }

  for (int i = 0; i < schema_.getNumColumns(); ++i) {
    if (record.isNull(i)) {
      columnVals_[i] = null;
      continue;
    }
    columnVals_[i] = columnValObjects_[i];
    com.cloudera.recordservice.core.Schema.ColumnDesc cInfo = schema_.getColumnInfo(i);
    Preconditions.checkNotNull(cInfo);
    switch (cInfo.type.typeId) {
      case BOOLEAN:
        ((BooleanWritable) columnValObjects_[i]).set(record.nextBoolean(i));
        break;
      case TINYINT:
        ((ByteWritable) columnValObjects_[i]).set(record.nextByte(i));
        break;
      case SMALLINT:
        ((ShortWritable) columnValObjects_[i]).set(record.nextShort(i));
        break;
      case INT:
        ((IntWritable) columnValObjects_[i]).set(record.nextInt(i));
        break;
      case BIGINT:
        ((LongWritable) columnValObjects_[i]).set(record.nextLong(i));
        break;
      case FLOAT:
        ((FloatWritable) columnValObjects_[i]).set(record.nextFloat(i));
        break;
      case DOUBLE:
        ((DoubleWritable) columnValObjects_[i]).set(record.nextDouble(i));
        break;

      case STRING:
      case VARCHAR:
      case CHAR:
        ByteArray s = record.nextByteArray(i);
        ((Text) columnValObjects_[i]).set(
            s.byteBuffer().array(), s.offset(), s.len());
        break;
      case TIMESTAMP_NANOS:
        ((TimestampNanosWritable) columnValObjects_[i]).set(
            record.nextTimestampNanos(i));
        break;
      case DECIMAL:
        ((DecimalWritable) columnValObjects_[i]).set(
            record.nextDecimal(i));
        break;
      default:
        throw new RuntimeException("Unsupported type: " + cInfo);
    }
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:66,代码来源:RecordServiceRecord.java

示例13: testReadAllTypes

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
@Test
public void testReadAllTypes() throws IOException, InterruptedException {
  Configuration config = new Configuration();
  RecordServiceInputFormat.RecordServiceRecordReader reader =
      new RecordServiceInputFormat.RecordServiceRecordReader();

  SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd");
  format.setTimeZone(TimeZone.getTimeZone("GMT"));

  try {
    RecordServiceConfig.setInputTable(config, null, "rs.alltypes");
    List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits;

    int numRows = 0;
    for (InputSplit split: splits) {
      reader.initialize(split,
          new TaskAttemptContextImpl(new JobConf(config), new TaskAttemptID()));
      while (reader.nextKeyValue()) {
        RecordServiceRecord value = reader.getCurrentValue();
        if (((BooleanWritable)value.getColumnValue(0)).get()) {
          assertEquals(0, ((ByteWritable)value.getColumnValue(1)).get());
          assertEquals(1, ((ShortWritable)value.getColumnValue(2)).get());
          assertEquals(2, ((IntWritable)value.getColumnValue(3)).get());
          assertEquals(3, ((LongWritable)value.getColumnValue(4)).get());
          assertEquals(4.0, ((FloatWritable)value.getColumnValue(5)).get(), 0.1);
          assertEquals(5.0, ((DoubleWritable)value.getColumnValue(6)).get(), 0.1);
          assertEquals("hello", value.getColumnValue(7).toString());
          assertEquals("vchar1", value.getColumnValue(8).toString());
          assertEquals("char1", value.getColumnValue(9).toString());
          assertEquals("2015-01-01", format.format(
              ((TimestampNanosWritable)value.getColumnValue(10)).get().toTimeStamp()));
          assertEquals(
              new BigDecimal("3.1415920000"),
              ((DecimalWritable)value.getColumnValue(11)).get().toBigDecimal());
        } else {
          assertEquals(6, ((ByteWritable)value.getColumnValue(1)).get());
          assertEquals(7, ((ShortWritable)value.getColumnValue(2)).get());
          assertEquals(8, ((IntWritable)value.getColumnValue(3)).get());
          assertEquals(9, ((LongWritable)value.getColumnValue(4)).get());
          assertEquals(10.0, ((FloatWritable)value.getColumnValue(5)).get(), 0.1);
          assertEquals(11.0, ((DoubleWritable)value.getColumnValue(6)).get(), 0.1);
          assertEquals("world", value.getColumnValue(7).toString());
          assertEquals("vchar2", value.getColumnValue(8).toString());
          assertEquals("char2", value.getColumnValue(9).toString());
          assertEquals("2016-01-01",
              format.format(
                  ((TimestampNanosWritable)value.getColumnValue(10))
                      .get().toTimeStamp()));
          assertEquals(
              new BigDecimal("1234.5678900000"),
              ((DecimalWritable)value.getColumnValue(11)).get().toBigDecimal());
        }
        ++numRows;
      }
    }
    assertEquals(2, numRows);
  } finally {
    reader.close();
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:61,代码来源:MapReduceTest.java

示例14: extractPigObject

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
/**
 * Converts object from Hive's value system to Pig's value system
 * see HCatBaseStorer#getJavaObj() for Pig->Hive conversion 
 * @param o object from Hive value system
 * @return object in Pig value system 
 */
public static Object extractPigObject(
    Object o, com.cloudera.recordservice.core.Schema.TypeDesc itemType)
    throws Exception {
  // Note that HCatRecordSerDe.serializePrimitiveField() will be called before this,
  // thus some type promotion/conversion may occur: e.g. Short to Integer. We should
  // refactor this so that it's hapenning in one place per module/product that we are
  // integrating with. All Pig conversion should be done here, etc.
  if(o == null) {
    return null;
  }
  Object result;
  switch (itemType.typeId) {
    case BOOLEAN:
      result = ((BooleanWritable) o).get();
      break;
    case TINYINT:
      result = ((ByteWritable) o).get();
      break;
    case SMALLINT:
      result = (int) ((ShortWritable) o).get();
      break;
    case INT:
      result = ((IntWritable) o).get();
      break;
    case BIGINT:
      result = ((LongWritable)o).get();
      break;
    case FLOAT:
      result = ((FloatWritable) o).get();
      break;
    case DOUBLE:
      result =  ((DoubleWritable) o).get();
      break;
    case STRING:
    case VARCHAR:
    case CHAR:
      result = o.toString();
      break;
    case TIMESTAMP_NANOS:
      TimestampNanos timestampNanos = ((TimestampNanosWritable) o).get();
      // TODO: make sure this is correct
      result = new DateTime(timestampNanos.toTimeStamp(),
          DateTimeZone.forTimeZone(TimeZone.getTimeZone("GMT")));
      break;
    case DECIMAL:
      Decimal decimal = ((DecimalWritable) o).get();
      result = decimal.toBigDecimal();
      break;
  default:
    result = o;
    break;
  }
  return result;
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:61,代码来源:PigHCatUtil.java

示例15: ShortArrayWritable

import org.apache.hadoop.io.ShortWritable; //导入依赖的package包/类
public ShortArrayWritable() {
	super(ShortWritable.class);
}
 
开发者ID:yasserglez,项目名称:pagerank-hadoop,代码行数:4,代码来源:ShortArrayWritable.java


注:本文中的org.apache.hadoop.io.ShortWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。