当前位置: 首页>>代码示例>>Java>>正文


Java Text类代码示例

本文整理汇总了Java中org.apache.hadoop.io.Text的典型用法代码示例。如果您正苦于以下问题:Java Text类的具体用法?Java Text怎么用?Java Text使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Text类属于org.apache.hadoop.io包,在下文中一共展示了Text类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: map

import org.apache.hadoop.io.Text; //导入依赖的package包/类
public void map(Object key, Text value, Context context)
        throws IOException, InterruptedException {
  // Make one mapper slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_map =
          conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);

  // IF TESTING MAPPER SPECULATIVE EXECUTION:
  //   Make the "*_m_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_m_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.MAP) && test_speculate_map
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(value, new IntWritable(1));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestSpeculativeExecution.java

示例2: testAddCreds

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddCreds() throws Exception {
  UserGroupInformation ugi = 
      UserGroupInformation.createRemoteUser("someone"); 
  
  Text service = new Text("service");
  Token<T> t1 = mock(Token.class);
  when(t1.getService()).thenReturn(service);
  Token<T> t2 = mock(Token.class);
  when(t2.getService()).thenReturn(new Text("service2"));
  byte[] secret = new byte[]{};
  Text secretKey = new Text("sshhh");

  // fill credentials
  Credentials creds = new Credentials();
  creds.addToken(t1.getService(), t1);
  creds.addToken(t2.getService(), t2);
  creds.addSecretKey(secretKey, secret);
  
  // add creds to ugi, and check ugi
  ugi.addCredentials(creds);
  checkTokens(ugi, t1, t2);
  assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestUserGroupInformation.java

示例3: map

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String doc = value.toString();
				
	String text = slice(doc, "<text", "</text>", true);
	if (text.length() < 1) return;
	
	char txt[] = text.toLowerCase().toCharArray();
	for (int i = 0; i < txt.length; ++i) {
		if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
			txt[i] = ' ';
	}
	
	String id = slice(doc, "<id>", "</id>", false);
	if (id.length() < 1) return;
	StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
	int sum = itr.countTokens();
	while (itr.hasMoreTokens()) {
		String s = itr.nextToken();
		word.set(id + '-' + s);
		IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
		IntArrayWritable temp = new IntArrayWritable(tmp);
		context.write(word, temp);
	}
}
 
开发者ID:lzmhhh123,项目名称:Wikipedia-Index,代码行数:26,代码来源:TF.java

示例4: write

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {

  output.writeInt(splitId);

  if (this.oracleDataChunks == null) {
    output.writeInt(0);
  } else {
    output.writeInt(this.oracleDataChunks.size());
    for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
      Text.writeString(output, dataChunk.getClass().getName());
      dataChunk.write(output);
    }
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:OraOopDBInputSplit.java

示例5: reduce

import org.apache.hadoop.io.Text; //导入依赖的package包/类
/** Combines values for a given key.  
 * @param key the key is expected to be a Text object, whose prefix indicates
 * the type of aggregation to aggregate the values. 
 * @param values the values to combine
 * @param context to collect combined values
 */
public void reduce(Text key, Iterable<Text> values, Context context) 
    throws IOException, InterruptedException {
  String keyStr = key.toString();
  int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
  String type = keyStr.substring(0, pos);
  long uniqCount = context.getConfiguration().
    getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
  ValueAggregator aggregator = ValueAggregatorBaseDescriptor
    .generateValueAggregator(type, uniqCount);
  for (Text val : values) {
    aggregator.addNextValue(val);
  }
  Iterator<?> outputs = aggregator.getCombinerOutput().iterator();

  while (outputs.hasNext()) {
    Object v = outputs.next();
    if (v instanceof Text) {
      context.write(key, (Text)v);
    } else {
      context.write(key, new Text(v.toString()));
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:ValueAggregatorCombiner.java

示例6: fetchColumnTest

import org.apache.hadoop.io.Text; //导入依赖的package包/类
private void fetchColumnTest(String configuration) throws Exception {
  List<String> rows = Arrays.asList("row1", "row2");
  List<String> colFs = Arrays.asList("colF1", "colF2");
  List<String> colQs = Arrays.asList("colQ1", "colQ2");
  List<String> colVs = Collections.singletonList("");
  List<String> values = Collections.singletonList("value");

  clearTable();
  EncryptedBatchWriter writer = getEncryptedWriter(CHARLIE, configuration);
  writeData(writer, rows, colFs, colQs, colVs, values);
  writer.close();

  EncryptedBatchScanner scanner = getEncryptedScanner(CHARLIE, configuration);
  scanner.setRanges(Collections.singletonList(new Range()));
  scanner.fetchColumn(new IteratorSetting.Column(new Text("colF1"), new Text("colQ1")));
  assertThat("contains the filtered data", scanner, hasData(rows, Collections.singletonList("colF1"), Collections.singletonList("colQ1"), colVs, values));
}
 
开发者ID:mit-ll,项目名称:PACE,代码行数:18,代码来源:FilteringIT.java

示例7: testFsWithDuplicateChildrenTokenExists

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@Test
public void testFsWithDuplicateChildrenTokenExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service = new Text("singleTokenFs1");
  Token<?> token = mock(Token.class);
  credentials.addToken(service, token);

  MockFileSystem fs = createFileSystemForServiceName(service);
  MockFileSystem multiFs =
      createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
  
  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false);
  verifyTokenFetch(fs, false);
  
  assertEquals(1, credentials.numberOfTokens());
  assertSame(token, credentials.getToken(service));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:TestFileSystemTokens.java

示例8: selectToken

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public Token<RMDelegationTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  LOG.debug("Looking for a token with service " + service.toString());
  for (Token<? extends TokenIdentifier> token : tokens) {
    LOG.debug("Token kind is " + token.getKind().toString()
        + " and the token's service name is " + token.getService());
    if (RMDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
        && checkService(service, token)) {
      return (Token<RMDelegationTokenIdentifier>) token;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:RMDelegationTokenSelector.java

示例9: fetchLine

import org.apache.hadoop.io.Text; //导入依赖的package包/类
/**
 * Fetches next line from file.
 * 
 * The line is stored in {@link #line} for further processing. The fields {@link #pos} and {@link #lastReadPos} are
 * updated accordingly.
 * 
 * If the end of the file has been reached, {@link #line} is set to <code>null</code> and the flag {@link eof} is
 * set to {@code true}.
 * 
 * @throws IOException
 */
private void fetchLine() throws IOException {

    if (isSplittable && posInFile() >= end) {
        eos = true;
    }
    Text text = new Text();
    int length = lineReader.readLine(text);
    if (length == 0) {
        eof = true;
        line = null;
        return;
    }
    lastReadPos = pos;
    pos += length;
    line = text.toString();
}
 
开发者ID:comdirect,项目名称:hadoop-logfile-inputformat,代码行数:28,代码来源:LogfileRecordReader.java

示例10: renewDelegationToken

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@Override
public RenewDelegationTokenResponse renewDelegationToken(
    RenewDelegationTokenRequest request) throws IOException {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be renewed only with kerberos authentication");
    }

    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<MRDelegationTokenIdentifier> token =
        new Token<MRDelegationTokenIdentifier>(
            protoToken.getIdentifier().array(), protoToken.getPassword()
                .array(), new Text(protoToken.getKind()), new Text(
                protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    long nextExpTime = jhsDTSecretManager.renewToken(token, user);
    RenewDelegationTokenResponse renewResponse = Records
        .newRecord(RenewDelegationTokenResponse.class);
    renewResponse.setNextExpirationTime(nextExpTime);
    return renewResponse;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:HistoryClientService.java

示例11: preserveFileAttributesForDirectories

import org.apache.hadoop.io.Text; //导入依赖的package包/类
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:CopyCommitter.java

示例12: main

import org.apache.hadoop.io.Text; //导入依赖的package包/类
public static void main(String[] args) throws IOException {

        if (args.length < 1) {
            System.err.println("Usage: RepartitionFastq <input path> <output path> <number of partitions>");
            System.exit(1);
        }

        SparkConf conf = new SparkConf().setAppName("RepartitionFastq");
        //conf.set("spark.default.parallelism", String.valueOf(args[2]));
        JavaSparkContext sc = new JavaSparkContext(conf);

        JavaPairRDD<Text, SequencedFragment> fastqRDD = sc.newAPIHadoopFile(args[0], FastqInputFormat.class, Text.class, SequencedFragment.class, sc.hadoopConfiguration());

        JavaPairRDD<Text, SequencedFragment> repartitioned = fastqRDD.repartition(Integer.valueOf(args[2]));

        repartitioned.saveAsNewAPIHadoopFile(args[1], Text.class, SequencedFragment.class, FastqOutputFormat.class, sc.hadoopConfiguration());

        sc.stop();
    }
 
开发者ID:NGSeq,项目名称:ViraPipe,代码行数:20,代码来源:RepartitionFastq.java

示例13: initTokenManager

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
  Configuration conf = new Configuration(false);
  for (Map.Entry entry : config.entrySet()) {
    conf.set((String) entry.getKey(), (String) entry.getValue());
  }
  String tokenKind = conf.get(TOKEN_KIND);
  if (tokenKind == null) {
    throw new IllegalArgumentException(
        "The configuration does not define the token kind");
  }
  tokenKind = tokenKind.trim();
  tokenManager = new DelegationTokenManager(conf, new Text(tokenKind));
  tokenManager.init();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DelegationTokenAuthenticationHandler.java

示例14: readFields

import org.apache.hadoop.io.Text; //导入依赖的package包/类
/**
 * Read (say, deserialize) an employee
 */
@Override
public void readFields(DataInput in) throws IOException {
	name = new Text();
	name.readFields(in);
	address = new Text();
	address.readFields(in);
	company = new Text();
	company.readFields(in);
	salary = new DoubleWritable();
	salary.readFields(in);
	department = new Text();
	department.readFields(in);
	isManager = new BooleanWritable();
	isManager.readFields(in);
}
 
开发者ID:amritbhat786,项目名称:DocIT,代码行数:19,代码来源:Employee.java

示例15: testDelegationTokenNullRenewer

import org.apache.hadoop.io.Text; //导入依赖的package包/类
@Test 
public void testDelegationTokenNullRenewer() throws Exception {
  TestDelegationTokenSecretManager dtSecretManager = 
    new TestDelegationTokenSecretManager(24*60*60*1000,
      10*1000,1*1000,3600000);
  dtSecretManager.startThreads();
  TestDelegationTokenIdentifier dtId = new TestDelegationTokenIdentifier(new Text(
      "theuser"), null, null);
  Token<TestDelegationTokenIdentifier> token = new Token<TestDelegationTokenIdentifier>(
      dtId, dtSecretManager);
  Assert.assertTrue(token != null);
  try {
    dtSecretManager.renewToken(token, "");
    Assert.fail("Renewal must not succeed");
  } catch (IOException e) {
    //PASS
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:TestDelegationToken.java


注:本文中的org.apache.hadoop.io.Text类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。