当前位置: 首页>>代码示例>>Java>>正文


Java LongWritable.set方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.LongWritable.set方法的典型用法代码示例。如果您正苦于以下问题:Java LongWritable.set方法的具体用法?Java LongWritable.set怎么用?Java LongWritable.set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.LongWritable的用法示例。


在下文中一共展示了LongWritable.set方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeInStoreHour

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
private void writeInStoreHour() throws IOException, InterruptedException{

        KeyWrapper cycleKey = new KeyWrapper();
        cycleKey.setType(new Text(MapKeyConfig.IN_STORE_HOUR));

        LongWritable longWritable = new LongWritable();
        cycleKey.setMillisTime(longWritable);

        IntWritable value = new IntWritable(1);


        List<Long> inStoreHours = statistic.getInStoreHours();
        for (Long inStoreTime : inStoreHours) {
            longWritable.set(IntervalCalculator.getInStoreInterval(inStoreTime));
            context.write(cycleKey, new ValueWrapper(value));
        }
    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:18,代码来源:MapperWriter.java

示例2: reduce

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
public void reduce(LongWritable key, Iterator<Text> values,
                   OutputCollector<LongWritable, Text> output,
                   Reporter reporter) throws IOException {
  while (values.hasNext()) {
    Text value = values.next();
    writeFlag(conf, "reduce." + name + ".value." + value);
    key.set(10);
    output.collect(key, value);
    if (byValue) {
      assertEquals(10, key.get());
    } else {
      assertNotSame(10, key.get());
    }
    key.set(11);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestChainMapReduce.java

示例3: next

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
private boolean next(LongWritable key, Text value) throws IOException {
    /**
     *  通过readUntilMatch方法查找xml段开始的标签,直到找到了,才开始
     *  写xml片段到buffer中去,如readUntilMatch的第二个参数为false则不查找的过
     *  程中写入数据到buffer,如果为true的话就边查找边写入
     */
    if( fsin.getPos() < end && readUntilMatch(startTag, false)) {
        //进入代码段则说明找到了开始标签,现在fsin的指针指在找到的开始标签的
        //最后一位上,所以向buffer中写入开始标签
        buffer.write(startTag);
        try {
            /**
             * 在fsin中去查找结束标签边查找边记录直到找到结束标签为止
             */
            if(readUntilMatch(endTag, true)) {
                /**
                 * 找到标签后把start标签的指针位置的偏移量赋值给key
                 * 把buffer中记录的整个xml完整片断赋值给value
                 */
                key.set(fsin.getPos() - buffer.getLength());
                value.set(buffer.getData(), 0, buffer.getLength());
                return true;
            }
        } finally {
            buffer.reset();
        }
    }
    return false;
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:30,代码来源:XmlInputFormat.java

示例4: write

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
public void write(DataOutput dataOutput) throws IOException {
    Text text = new Text(wifiProb==null?"":wifiProb);
    text.write(dataOutput);

    LongWritable longWritable = new LongWritable();
    longWritable.set(hour);
    longWritable.write(dataOutput);
    longWritable.set(newCustomer);
    longWritable.write(dataOutput);
    longWritable.set(oldCustomer);
    longWritable.write(dataOutput);

}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:14,代码来源:NewOldCustomElement.java

示例5: writNewOldCustomer

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
private void writNewOldCustomer() throws IOException, InterruptedException {

        KeyWrapper newOldKey = new KeyWrapper();
        newOldKey.setType(new Text(MapKeyConfig.NEW_OLD_CUSTOMER));

        LongWritable longWritable = new LongWritable();
        newOldKey.setMillisTime(longWritable);

        for (NewOldCustomElement newOldCustomElement : statistic.getNewOldCustomElements()) {
            longWritable.set(newOldCustomElement.getHour());
            context.write(newOldKey, new ValueWrapper(newOldCustomElement));
        }
    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:14,代码来源:MapperWriter.java

示例6: writCustomerFlow

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
private void writCustomerFlow() throws IOException, InterruptedException{

        KeyWrapper customerFlowKey = new KeyWrapper();
        customerFlowKey.setType(new Text(MapKeyConfig.CUSTOMER_FLOW_KEY));

        LongWritable longWritable = new LongWritable();
        customerFlowKey.setMillisTime(longWritable);

        for (CustomerFlowElement customerFlowElement:statistic.getCustomerFlowElements()) {
            longWritable.set(customerFlowElement.getHour());
            context.write(customerFlowKey, new ValueWrapper(customerFlowElement));
        }
    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:14,代码来源:MapperWriter.java

示例7: writeCycle

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
private void writeCycle() throws IOException, InterruptedException{

        KeyWrapper cycleKey = new KeyWrapper();
        cycleKey.setType(new Text(MapKeyConfig.CYCLE));

        LongWritable longWritable = new LongWritable();
        cycleKey.setMillisTime(longWritable);

        IntWritable value = new IntWritable(1);

        for (Long cycle : statistic.getCycles()) {
            longWritable.set(IntervalCalculator.getCycleInterval(cycle));
            context.write(cycleKey, new ValueWrapper(value));
        }
    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:16,代码来源:MapperWriter.java

示例8: getRecordReader

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
public RecordReader<LongWritable, Text> getRecordReader(
    InputSplit ignored, JobConf conf, Reporter reporter) throws IOException {

  return new RecordReader<LongWritable, Text>() {

    boolean sentOneRecord = false;

    public boolean next(LongWritable key, Text value)
        throws IOException {
      key.set(1);
      value.set("dummy");
      if (sentOneRecord == false) { // first call
        sentOneRecord = true;
        return true;
      }
      return false; // we have sent one record - we are done
    }

    public LongWritable createKey() {
      return new LongWritable();
    }
    public Text createValue() {
      return new Text();
    }
    public long getPos() throws IOException {
      return 1;
    }
    public void close() throws IOException {
    }
    public float getProgress() throws IOException {
      return 1;
    }
  };
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:LoadGeneratorMR.java

示例9: map

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
public void map(LongWritable key, Text value,
                OutputCollector<LongWritable, Text> output,
                Reporter reporter) throws IOException {
  writeFlag(conf, "map." + name + ".value." + value);
  key.set(10);
  output.collect(key, value);
  if (byValue) {
    assertEquals(10, key.get());
  } else {
    assertNotSame(10, key.get());
  }
  key.set(11);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestChainMapReduce.java

示例10: next

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
@Override
public synchronized boolean next(LongWritable key, BytesWritable value)
    throws IOException {
  boolean dataRead = reader.nextKeyValue();
  if (dataRead) {
    LongWritable newKey = reader.getCurrentKey();
    BytesWritable newValue = reader.getCurrentValue();
    key.set(newKey.get());
    value.set(newValue);
  }
  return dataRead;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:FixedLengthRecordReader.java

示例11: next

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
/** Read a line. */
public synchronized boolean next(LongWritable key, Text value)
  throws IOException {

  // We always read one extra line, which lies outside the upper
  // split limit i.e. (end - 1)
  while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
    key.set(pos);

    int newSize = 0;
    if (pos == 0) {
      newSize = skipUtfByteOrderMark(value);
    } else {
      newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
      pos += newSize;
    }

    if (newSize == 0) {
      return false;
    }
    if (newSize < maxLineLength) {
      return true;
    }

    // line too long. try again
    LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize));
  }

  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:LineRecordReader.java

示例12: transferToFully

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
/**
 * Transfers data from FileChannel using 
 * {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
 * Updates <code>waitForWritableTime</code> and <code>transferToTime</code>
 * with the time spent blocked on the network and the time spent transferring
 * data from disk to network respectively.
 * 
 * Similar to readFully(), this waits till requested amount of 
 * data is transfered.
 * 
 * @param fileCh FileChannel to transfer data from.
 * @param position position within the channel where the transfer begins
 * @param count number of bytes to transfer.
 * @param waitForWritableTime nanoseconds spent waiting for the socket 
 *        to become writable
 * @param transferTime nanoseconds spent transferring data
 * 
 * @throws EOFException 
 *         If end of input file is reached before requested number of 
 *         bytes are transfered.
 *
 * @throws SocketTimeoutException 
 *         If this channel blocks transfer longer than timeout for 
 *         this stream.
 *          
 * @throws IOException Includes any exception thrown by 
 *         {@link FileChannel#transferTo(long, long, WritableByteChannel)}. 
 */
public void transferToFully(FileChannel fileCh, long position, int count,
    LongWritable waitForWritableTime,
    LongWritable transferToTime) throws IOException {
  long waitTime = 0;
  long transferTime = 0;
  while (count > 0) {
    /* 
     * Ideally we should wait after transferTo returns 0. But because of
     * a bug in JRE on Linux (http://bugs.sun.com/view_bug.do?bug_id=5103988),
     * which throws an exception instead of returning 0, we wait for the
     * channel to be writable before writing to it. If you ever see 
     * IOException with message "Resource temporarily unavailable" 
     * thrown here, please let us know.
     * 
     * Once we move to JAVA SE 7, wait should be moved to correct place.
     */
    long start = System.nanoTime();
    waitForWritable();
    long wait = System.nanoTime();

    int nTransfered = (int) fileCh.transferTo(position, count, getChannel());
    
    if (nTransfered == 0) {
      //check if end of file is reached.
      if (position >= fileCh.size()) {
        throw new EOFException("EOF Reached. file size is " + fileCh.size() + 
                               " and " + count + " more bytes left to be " +
                               "transfered.");
      }
      //otherwise assume the socket is full.
      //waitForWritable(); // see comment above.
    } else if (nTransfered < 0) {
      throw new IOException("Unexpected return of " + nTransfered + 
                            " from transferTo()");
    } else {
      position += nTransfered;
      count -= nTransfered;
    }
    long transfer = System.nanoTime();
    waitTime += wait - start;
    transferTime += transfer - wait;
  }
  
  if (waitForWritableTime != null) {
    waitForWritableTime.set(waitTime);
  }
  if (transferToTime != null) {
    transferToTime.set(transferTime);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:79,代码来源:SocketOutputStream.java

示例13: writeLong

import org.apache.hadoop.io.LongWritable; //导入方法依赖的package包/类
/** write the long value */
static void writeLong(long value, DataOutputStream out) throws IOException {
  LongWritable uLong = TL_DATA.get().U_LONG;
  uLong.set(value);
  uLong.write(out);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:FSImageSerialization.java


注:本文中的org.apache.hadoop.io.LongWritable.set方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。