當前位置: 首頁>>代碼示例>>Java>>正文


Java Text類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.Text的典型用法代碼示例。如果您正苦於以下問題:Java Text類的具體用法?Java Text怎麽用?Java Text使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Text類屬於org.apache.hadoop.io包,在下文中一共展示了Text類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: map

import org.apache.hadoop.io.Text; //導入依賴的package包/類
public void map(Object key, Text value, Context context)
        throws IOException, InterruptedException {
  // Make one mapper slower for speculative execution
  TaskAttemptID taid = context.getTaskAttemptID();
  long sleepTime = 100;
  Configuration conf = context.getConfiguration();
  boolean test_speculate_map =
          conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);

  // IF TESTING MAPPER SPECULATIVE EXECUTION:
  //   Make the "*_m_000000_0" attempt take much longer than the others.
  //   When speculative execution is enabled, this should cause the attempt
  //   to be killed and restarted. At that point, the attempt ID will be
  //   "*_m_000000_1", so sleepTime will still remain 100ms.
  if ( (taid.getTaskType() == TaskType.MAP) && test_speculate_map
        && (taid.getTaskID().getId() == 0) && (taid.getId() == 0)) {
    sleepTime = 10000;
  }
  try{
    Thread.sleep(sleepTime);
  } catch(InterruptedException ie) {
    // Ignore
  }
  context.write(value, new IntWritable(1));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestSpeculativeExecution.java

示例2: testAddCreds

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddCreds() throws Exception {
  UserGroupInformation ugi = 
      UserGroupInformation.createRemoteUser("someone"); 
  
  Text service = new Text("service");
  Token<T> t1 = mock(Token.class);
  when(t1.getService()).thenReturn(service);
  Token<T> t2 = mock(Token.class);
  when(t2.getService()).thenReturn(new Text("service2"));
  byte[] secret = new byte[]{};
  Text secretKey = new Text("sshhh");

  // fill credentials
  Credentials creds = new Credentials();
  creds.addToken(t1.getService(), t1);
  creds.addToken(t2.getService(), t2);
  creds.addSecretKey(secretKey, secret);
  
  // add creds to ugi, and check ugi
  ugi.addCredentials(creds);
  checkTokens(ugi, t1, t2);
  assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:26,代碼來源:TestUserGroupInformation.java

示例3: map

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String doc = value.toString();
				
	String text = slice(doc, "<text", "</text>", true);
	if (text.length() < 1) return;
	
	char txt[] = text.toLowerCase().toCharArray();
	for (int i = 0; i < txt.length; ++i) {
		if (!((txt[i] >= 'a' && txt[i] <= 'z') || (txt[i] >= 'A' && txt[i] <= 'Z')))
			txt[i] = ' ';
	}
	
	String id = slice(doc, "<id>", "</id>", false);
	if (id.length() < 1) return;
	StringTokenizer itr = new StringTokenizer(String.valueOf(txt));
	int sum = itr.countTokens();
	while (itr.hasMoreTokens()) {
		String s = itr.nextToken();
		word.set(id + '-' + s);
		IntWritable tmp[] = {new IntWritable(sum), new IntWritable(1)};
		IntArrayWritable temp = new IntArrayWritable(tmp);
		context.write(word, temp);
	}
}
 
開發者ID:lzmhhh123,項目名稱:Wikipedia-Index,代碼行數:26,代碼來源:TF.java

示例4: write

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@Override
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {

  output.writeInt(splitId);

  if (this.oracleDataChunks == null) {
    output.writeInt(0);
  } else {
    output.writeInt(this.oracleDataChunks.size());
    for (OraOopOracleDataChunk dataChunk : this.oracleDataChunks) {
      Text.writeString(output, dataChunk.getClass().getName());
      dataChunk.write(output);
    }
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:OraOopDBInputSplit.java

示例5: reduce

import org.apache.hadoop.io.Text; //導入依賴的package包/類
/** Combines values for a given key.  
 * @param key the key is expected to be a Text object, whose prefix indicates
 * the type of aggregation to aggregate the values. 
 * @param values the values to combine
 * @param context to collect combined values
 */
public void reduce(Text key, Iterable<Text> values, Context context) 
    throws IOException, InterruptedException {
  String keyStr = key.toString();
  int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
  String type = keyStr.substring(0, pos);
  long uniqCount = context.getConfiguration().
    getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
  ValueAggregator aggregator = ValueAggregatorBaseDescriptor
    .generateValueAggregator(type, uniqCount);
  for (Text val : values) {
    aggregator.addNextValue(val);
  }
  Iterator<?> outputs = aggregator.getCombinerOutput().iterator();

  while (outputs.hasNext()) {
    Object v = outputs.next();
    if (v instanceof Text) {
      context.write(key, (Text)v);
    } else {
      context.write(key, new Text(v.toString()));
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:ValueAggregatorCombiner.java

示例6: fetchColumnTest

import org.apache.hadoop.io.Text; //導入依賴的package包/類
private void fetchColumnTest(String configuration) throws Exception {
  List<String> rows = Arrays.asList("row1", "row2");
  List<String> colFs = Arrays.asList("colF1", "colF2");
  List<String> colQs = Arrays.asList("colQ1", "colQ2");
  List<String> colVs = Collections.singletonList("");
  List<String> values = Collections.singletonList("value");

  clearTable();
  EncryptedBatchWriter writer = getEncryptedWriter(CHARLIE, configuration);
  writeData(writer, rows, colFs, colQs, colVs, values);
  writer.close();

  EncryptedBatchScanner scanner = getEncryptedScanner(CHARLIE, configuration);
  scanner.setRanges(Collections.singletonList(new Range()));
  scanner.fetchColumn(new IteratorSetting.Column(new Text("colF1"), new Text("colQ1")));
  assertThat("contains the filtered data", scanner, hasData(rows, Collections.singletonList("colF1"), Collections.singletonList("colQ1"), colVs, values));
}
 
開發者ID:mit-ll,項目名稱:PACE,代碼行數:18,代碼來源:FilteringIT.java

示例7: testFsWithDuplicateChildrenTokenExists

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@Test
public void testFsWithDuplicateChildrenTokenExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service = new Text("singleTokenFs1");
  Token<?> token = mock(Token.class);
  credentials.addToken(service, token);

  MockFileSystem fs = createFileSystemForServiceName(service);
  MockFileSystem multiFs =
      createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
  
  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false);
  verifyTokenFetch(fs, false);
  
  assertEquals(1, credentials.numberOfTokens());
  assertSame(token, credentials.getToken(service));
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:19,代碼來源:TestFileSystemTokens.java

示例8: selectToken

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@SuppressWarnings("unchecked")
public Token<RMDelegationTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  LOG.debug("Looking for a token with service " + service.toString());
  for (Token<? extends TokenIdentifier> token : tokens) {
    LOG.debug("Token kind is " + token.getKind().toString()
        + " and the token's service name is " + token.getService());
    if (RMDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
        && checkService(service, token)) {
      return (Token<RMDelegationTokenIdentifier>) token;
    }
  }
  return null;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:RMDelegationTokenSelector.java

示例9: fetchLine

import org.apache.hadoop.io.Text; //導入依賴的package包/類
/**
 * Fetches next line from file.
 * 
 * The line is stored in {@link #line} for further processing. The fields {@link #pos} and {@link #lastReadPos} are
 * updated accordingly.
 * 
 * If the end of the file has been reached, {@link #line} is set to <code>null</code> and the flag {@link eof} is
 * set to {@code true}.
 * 
 * @throws IOException
 */
private void fetchLine() throws IOException {

    if (isSplittable && posInFile() >= end) {
        eos = true;
    }
    Text text = new Text();
    int length = lineReader.readLine(text);
    if (length == 0) {
        eof = true;
        line = null;
        return;
    }
    lastReadPos = pos;
    pos += length;
    line = text.toString();
}
 
開發者ID:comdirect,項目名稱:hadoop-logfile-inputformat,代碼行數:28,代碼來源:LogfileRecordReader.java

示例10: renewDelegationToken

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@Override
public RenewDelegationTokenResponse renewDelegationToken(
    RenewDelegationTokenRequest request) throws IOException {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be renewed only with kerberos authentication");
    }

    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<MRDelegationTokenIdentifier> token =
        new Token<MRDelegationTokenIdentifier>(
            protoToken.getIdentifier().array(), protoToken.getPassword()
                .array(), new Text(protoToken.getKind()), new Text(
                protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    long nextExpTime = jhsDTSecretManager.renewToken(token, user);
    RenewDelegationTokenResponse renewResponse = Records
        .newRecord(RenewDelegationTokenResponse.class);
    renewResponse.setNextExpirationTime(nextExpTime);
    return renewResponse;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:HistoryClientService.java

示例11: preserveFileAttributesForDirectories

import org.apache.hadoop.io.Text; //導入依賴的package包/類
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:CopyCommitter.java

示例12: main

import org.apache.hadoop.io.Text; //導入依賴的package包/類
public static void main(String[] args) throws IOException {

        if (args.length < 1) {
            System.err.println("Usage: RepartitionFastq <input path> <output path> <number of partitions>");
            System.exit(1);
        }

        SparkConf conf = new SparkConf().setAppName("RepartitionFastq");
        //conf.set("spark.default.parallelism", String.valueOf(args[2]));
        JavaSparkContext sc = new JavaSparkContext(conf);

        JavaPairRDD<Text, SequencedFragment> fastqRDD = sc.newAPIHadoopFile(args[0], FastqInputFormat.class, Text.class, SequencedFragment.class, sc.hadoopConfiguration());

        JavaPairRDD<Text, SequencedFragment> repartitioned = fastqRDD.repartition(Integer.valueOf(args[2]));

        repartitioned.saveAsNewAPIHadoopFile(args[1], Text.class, SequencedFragment.class, FastqOutputFormat.class, sc.hadoopConfiguration());

        sc.stop();
    }
 
開發者ID:NGSeq,項目名稱:ViraPipe,代碼行數:20,代碼來源:RepartitionFastq.java

示例13: initTokenManager

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
  Configuration conf = new Configuration(false);
  for (Map.Entry entry : config.entrySet()) {
    conf.set((String) entry.getKey(), (String) entry.getValue());
  }
  String tokenKind = conf.get(TOKEN_KIND);
  if (tokenKind == null) {
    throw new IllegalArgumentException(
        "The configuration does not define the token kind");
  }
  tokenKind = tokenKind.trim();
  tokenManager = new DelegationTokenManager(conf, new Text(tokenKind));
  tokenManager.init();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:17,代碼來源:DelegationTokenAuthenticationHandler.java

示例14: readFields

import org.apache.hadoop.io.Text; //導入依賴的package包/類
/**
 * Read (say, deserialize) an employee
 */
@Override
public void readFields(DataInput in) throws IOException {
	name = new Text();
	name.readFields(in);
	address = new Text();
	address.readFields(in);
	company = new Text();
	company.readFields(in);
	salary = new DoubleWritable();
	salary.readFields(in);
	department = new Text();
	department.readFields(in);
	isManager = new BooleanWritable();
	isManager.readFields(in);
}
 
開發者ID:amritbhat786,項目名稱:DocIT,代碼行數:19,代碼來源:Employee.java

示例15: testDelegationTokenNullRenewer

import org.apache.hadoop.io.Text; //導入依賴的package包/類
@Test 
public void testDelegationTokenNullRenewer() throws Exception {
  TestDelegationTokenSecretManager dtSecretManager = 
    new TestDelegationTokenSecretManager(24*60*60*1000,
      10*1000,1*1000,3600000);
  dtSecretManager.startThreads();
  TestDelegationTokenIdentifier dtId = new TestDelegationTokenIdentifier(new Text(
      "theuser"), null, null);
  Token<TestDelegationTokenIdentifier> token = new Token<TestDelegationTokenIdentifier>(
      dtId, dtSecretManager);
  Assert.assertTrue(token != null);
  try {
    dtSecretManager.renewToken(token, "");
    Assert.fail("Renewal must not succeed");
  } catch (IOException e) {
    //PASS
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:19,代碼來源:TestDelegationToken.java


注:本文中的org.apache.hadoop.io.Text類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。