當前位置: 首頁>>代碼示例>>Java>>正文


Java ArrayWritable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.ArrayWritable的典型用法代碼示例。如果您正苦於以下問題:Java ArrayWritable類的具體用法?Java ArrayWritable怎麽用?Java ArrayWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


ArrayWritable類屬於org.apache.hadoop.io包,在下文中一共展示了ArrayWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: writeFields

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Override
public void writeFields(DataOutputStream out) throws IOException {
  FSImageSerialization.writeLong(inodeId, out);
  FSImageSerialization.writeString(path, out);
  FSImageSerialization.writeShort(replication, out);
  FSImageSerialization.writeLong(mtime, out);
  FSImageSerialization.writeLong(atime, out);
  FSImageSerialization.writeLong(blockSize, out);
  new ArrayWritable(Block.class, blocks).write(out);
  permissions.write(out);

  if (this.opCode == OP_ADD) {
    AclEditLogUtil.write(aclEntries, out);
    XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
    b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
    b.build().writeDelimitedTo(out);
    FSImageSerialization.writeString(clientName,out);
    FSImageSerialization.writeString(clientMachine,out);
    FSImageSerialization.writeBoolean(overwrite, out);
    FSImageSerialization.writeByte(storagePolicyId, out);
    // write clientId and callId
    writeRpcIds(rpcClientId, rpcCallId, out);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:FSEditLogOp.java

示例2: writeFields

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Override
public void writeFields(DataOutputStream out) throws IOException {
  FSImageSerialization.writeLong(inodeId, out);
  FSImageSerialization.writeString(path, out);
  FSImageSerialization.writeShort(replication, out);
  FSImageSerialization.writeLong(mtime, out);
  FSImageSerialization.writeLong(atime, out);
  FSImageSerialization.writeLong(blockSize, out);
  new ArrayWritable(Block.class, blocks).write(out);
  permissions.write(out);

  if (this.opCode == OP_ADD) {
    AclEditLogUtil.write(aclEntries, out);
    XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
    b.addAllXAttrs(PBHelperClient.convertXAttrProto(xAttrs));
    b.build().writeDelimitedTo(out);
    FSImageSerialization.writeString(clientName,out);
    FSImageSerialization.writeString(clientMachine,out);
    FSImageSerialization.writeBoolean(overwrite, out);
    FSImageSerialization.writeByte(storagePolicyId, out);
    // write clientId and callId
    writeRpcIds(rpcClientId, rpcCallId, out);
  }
}
 
開發者ID:aliyun-beta,項目名稱:aliyun-oss-hadoop-fs,代碼行數:25,代碼來源:FSEditLogOp.java

示例3: toString

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Override
public String toString() {
    Iterator<Entry<Writable, Writable>> i = entrySet().iterator();
    if (!i.hasNext())
        return "{}";

    StringBuilder sb = new StringBuilder();
    sb.append('{');
    for (;;) {
        Entry<Writable, Writable> e = i.next();
        Writable key = e.getKey();
        Writable value = e.getValue();
        sb.append(key == this ? "(this Map)" : key);
        sb.append('=');
        if (value instanceof ArrayWritable) {
            sb.append(Arrays.toString(((ArrayWritable) value).get()));
        }
        else {
            sb.append(value == this ? "(this Map)" : value);
        }
        if (!i.hasNext())
            return sb.append('}').toString();
        sb.append(", ");
    }
}
 
開發者ID:xushjie1987,項目名稱:es-hadoop-v2.2.0,代碼行數:26,代碼來源:LinkedMapWritable.java

示例4: readExcelInputFormatExcel2003Empty

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
    public void readExcelInputFormatExcel2003Empty() throws IOException {
JobConf job = new JobConf(defaultConf);
    	ClassLoader classLoader = getClass().getClassLoader();
    	String fileName="excel2003empty.xls";
    	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
    	Path file = new Path(fileNameSpreadSheet);
    	FileInputFormat.setInputPaths(job, file);
	// set locale to the one of the test data
	job.set("hadoopoffice.locale.bcp47","de");
   	ExcelFileInputFormat format = new ExcelFileInputFormat();
    	format.configure(job);
    	InputSplit[] inputSplits = format.getSplits(job,1);
    	assertEquals(1, inputSplits.length,"Only one split generated for Excel file");
    	RecordReader<Text, ArrayWritable> reader = format.getRecordReader(inputSplits[0], job, reporter);
	assertNotNull(reader,"Format returned  null RecordReader");
	Text spreadSheetKey = new Text();	
	ArrayWritable spreadSheetValue = new ArrayWritable(SpreadSheetCellDAO.class);
	assertTrue( reader.next(spreadSheetKey,spreadSheetValue),"Input Split for Excel file contains row 1");
	assertEquals(0,spreadSheetValue.get().length,"Input Split for Excel file contain row 1 and is empty");	
	assertFalse(reader.next(spreadSheetKey,spreadSheetValue),"Input Split for Excel file contains no further row");	
    }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:23,代碼來源:OfficeFormatHadoopExcelTest.java

示例5: readExcelInputFormatExcel2013Empty

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013Empty() throws IOException {
       JobConf job = new JobConf(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013empty.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
   	FileInputFormat.setInputPaths(job, file);
// set locale to the one of the test data
job.set("hadoopoffice.read.locale.bcp47","de");
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	format.configure(job);
   	InputSplit[] inputSplits = format.getSplits(job,1);
   	assertEquals(1, inputSplits.length,"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.getRecordReader(inputSplits[0], job, reporter);
assertNotNull(reader,"Format returned  null RecordReader");
Text spreadSheetKey = new Text();	
ArrayWritable spreadSheetValue = new ArrayWritable(SpreadSheetCellDAO.class);
assertTrue( reader.next(spreadSheetKey,spreadSheetValue), "Input Split for Excel file contains row 1");
assertEquals(0,spreadSheetValue.get().length, "Input Split for Excel file contain row 1 and is empty");	
assertFalse(reader.next(spreadSheetKey,spreadSheetValue), "Input Split for Excel file contains no further row");		
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:23,代碼來源:OfficeFormatHadoopExcelTest.java

示例6: readExcelInputFormatExcel2013SingleSheetEncryptedNegative

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013SingleSheetEncryptedNegative() throws IOException {
   	JobConf job = new JobConf(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013encrypt.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
   	FileInputFormat.setInputPaths(job, file);
// set locale to the one of the test data
job.set("hadoopoffice.read.locale.bcp47","de");
// for decryption simply set the password
job.set("hadoopoffice.read.security.crypt.password","test2");
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	format.configure(job);
   	InputSplit[] inputSplits = format.getSplits(job,1);
   	assertEquals(1, inputSplits.length, "Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.getRecordReader(inputSplits[0], job, reporter);	
   	assertNull(reader, "Null record reader implies invalid password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:20,代碼來源:OfficeFormatHadoopExcelTest.java

示例7: readExcelInputFormatExcel2013SingleSheetEncryptedNegativeLowFootprint

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013SingleSheetEncryptedNegativeLowFootprint() throws IOException {
   	JobConf job = new JobConf(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013encrypt.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
   	FileInputFormat.setInputPaths(job, file);
// set locale to the one of the test data
job.set("hadoopoffice.read.locale.bcp47","de");
// low footprint
job.set("hadoopoffice.read.lowFootprint", "true");
// for decryption simply set the password
job.set("hadoopoffice.read.security.crypt.password","test2");
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	format.configure(job);
   	InputSplit[] inputSplits = format.getSplits(job,1);
   	assertEquals(1,inputSplits.length,"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.getRecordReader(inputSplits[0], job, reporter);	
   	assertNull(reader,"Null record reader implies invalid password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:22,代碼來源:OfficeFormatHadoopExcelTest.java

示例8: readExcelInputFormatExcel2003SingleSheetEncryptedNegativeLowFootprint

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2003SingleSheetEncryptedNegativeLowFootprint() throws IOException {
   	JobConf job = new JobConf(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2003encrypt.xls";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
   	FileInputFormat.setInputPaths(job, file);
// set locale to the one of the test data
job.set("hadoopoffice.read.locale.bcp47","de");

// low footprint
job.set("hadoopoffice.read.lowFootprint", "true");
// for decryption simply set the password
job.set("hadoopoffice.read.security.crypt.password","test2");
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	format.configure(job);
   	InputSplit[] inputSplits = format.getSplits(job,1);
   	assertEquals(1,inputSplits.length,"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.getRecordReader(inputSplits[0], job, reporter);
   	assertNull(reader,"Null record reader implies invalid password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:23,代碼來源:OfficeFormatHadoopExcelTest.java

示例9: readExcelInputFormatExcel2013Empty

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013Empty() throws IOException, InterruptedException {
	Configuration conf = new Configuration(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013empty.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
// set locale to the one of the test data
conf.set("hadoopoffice.locale.bcp47","de");
Job job = Job.getInstance(conf);
   	FileInputFormat.setInputPaths(job, file);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
	List<InputSplit> splits = format.getSplits(job);
   	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);
assertNotNull( reader,"Format returned  null RecordReader");
reader.initialize(splits.get(0),context);
Text spreadSheetKey = new Text();	
ArrayWritable spreadSheetValue = new ArrayWritable(SpreadSheetCellDAO.class);
assertTrue( reader.nextKeyValue(),"Input Split for Excel file contains row 1");
spreadSheetKey=reader.getCurrentKey();
spreadSheetValue=reader.getCurrentValue();
assertEquals( 0,spreadSheetValue.get().length,"Input Split for Excel file contain row 1 and is empty");	
assertFalse( reader.nextKeyValue(),"Input Split for Excel file contains no further row");			
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:27,代碼來源:OfficeFormatHadoopExcelTest.java

示例10: readExcelInputFormatExcel2013SingleSheetEncryptedNegative

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013SingleSheetEncryptedNegative() throws IOException, InterruptedException {
Configuration conf = new Configuration(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013encrypt.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
// set locale to the one of the test data
conf.set("hadoopoffice.read.locale.bcp47","de");
// for decryption simply set the password
conf.set("hadoopoffice.read.security.crypt.password","test2");
 	Job job = Job.getInstance(conf);
   	FileInputFormat.setInputPaths(job, file);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	List<InputSplit> splits = format.getSplits(job);
   	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);	

InterruptedException ex = assertThrows(InterruptedException.class, ()->reader.initialize(splits.get(0),context),"Exception is thrown in case of wrong password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:22,代碼來源:OfficeFormatHadoopExcelTest.java

示例11: readExcelInputFormatExcel2003SingleSheetEncryptedNegative

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2003SingleSheetEncryptedNegative() throws IOException, InterruptedException {
   	Configuration conf = new Configuration(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2003encrypt.xls";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
// set locale to the one of the test data
conf.set("hadoopoffice.read.locale.bcp47","de");
// for decryption simply set the password
conf.set("hadoopoffice.read.security.crypt.password","test2");
  	Job job = Job.getInstance(conf);
   	FileInputFormat.setInputPaths(job, file);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
  	List<InputSplit> splits = format.getSplits(job);
   	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);
   	InterruptedException ex = assertThrows(InterruptedException.class, ()->reader.initialize(splits.get(0),context),"Exception is thrown in case of wrong password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:21,代碼來源:OfficeFormatHadoopExcelTest.java

示例12: getSelectorByQueryType

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
/**
 * Pulls the correct selector from the MapWritable data element given the queryType
 * <p>
 * Pulls first element of array if element is an array type
 */
public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema)
{
  String selector;

  String fieldName = qSchema.getSelectorName();
  if (dSchema.isArrayElement(fieldName))
  {
    if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable)
    {
      String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = selectorArray[0];
    }
    else
    {
      String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings();
      selector = elementArray[0];
    }
  }
  else
  {
    selector = dataMap.get(dSchema.getTextName(fieldName)).toString();
  }

  return selector;
}
 
開發者ID:apache,項目名稱:incubator-pirk,代碼行數:31,代碼來源:QueryUtils.java

示例13: readExcelInputFormatExcel2003SingleSheetEncryptedPositiveLowFootprint

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
    public void readExcelInputFormatExcel2003SingleSheetEncryptedPositiveLowFootprint() throws IOException, InterruptedException {
    	Configuration conf = new Configuration(defaultConf);
    	ClassLoader classLoader = getClass().getClassLoader();
    	String fileName="excel2003encrypt.xls";
    	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
    	Path file = new Path(fileNameSpreadSheet);
 
	// set locale to the one of the test data
	conf.set("hadoopoffice.read.locale.bcp47","de");

	// low footprint
	conf.set("hadoopoffice.read.lowFootprint", "true");
	// for decryption simply set the password
	conf.set("hadoopoffice.read.security.crypt.password","test");
   	Job job = Job.getInstance(conf);
    	FileInputFormat.setInputPaths(job, file);
	TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
   	ExcelFileInputFormat format = new ExcelFileInputFormat();
  	List<InputSplit> splits = format.getSplits(job);
    	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
    	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);
	assertNotNull( reader,"Format returned  null RecordReader");
	reader.initialize(splits.get(0),context);
	Text spreadSheetKey = new Text();	
	ArrayWritable spreadSheetValue = new ArrayWritable(SpreadSheetCellDAO.class);
	assertTrue( reader.nextKeyValue(),"Input Split for Excel file contains row 1");	
	spreadSheetKey=reader.getCurrentKey();
	spreadSheetValue=reader.getCurrentValue();
	assertEquals( "[excel2003encrypt.xls]Sheet1!A1", spreadSheetKey.toString(),"Input Split for Excel file has keyname == \"[excel2003encrypt.xls]Sheet1!A1\"");
	assertEquals( 3, spreadSheetValue.get().length,"Input Split for Excel file contains row 1 with 3 columns");
	assertEquals( "test1", ((SpreadSheetCellDAO)spreadSheetValue.get()[0]).getFormattedValue(),"Input Split for Excel file contains row 1 with cell 1 == \"test1\"");
	assertEquals( "Sheet1", ((SpreadSheetCellDAO)spreadSheetValue.get()[0]).getSheetName(),"Input Split for Excel file contains row 1 with cell 1 sheetname == \"Sheet1\"");	
	assertEquals( "A1", ((SpreadSheetCellDAO)spreadSheetValue.get()[0]).getAddress(),"Input Split for Excel file contains row 1 with cell 1 address == \"A1\"");	
assertEquals( "test2", ((SpreadSheetCellDAO)spreadSheetValue.get()[1]).getFormattedValue(),"Input Split for Excel file contains row 1 with cell 2 == \"test2\"");	
assertEquals( "test3", ((SpreadSheetCellDAO)spreadSheetValue.get()[2]).getFormattedValue(),"Input Split for Excel file contains row 1 with cell 3 == \"test3\"");	
    }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:38,代碼來源:OfficeFormatHadoopExcelTest.java

示例14: readExcelInputFormatExcel2003SingleSheetEncryptedNegativeLowFootprint

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2003SingleSheetEncryptedNegativeLowFootprint() throws IOException, InterruptedException {
   	Configuration conf = new Configuration(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2003encrypt.xls";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
// set locale to the one of the test data
conf.set("hadoopoffice.read.locale.bcp47","de");

// low footprint
conf.set("hadoopoffice.read.lowFootprint", "true");
// for decryption simply set the password
conf.set("hadoopoffice.read.security.crypt.password","test2");
  	Job job = Job.getInstance(conf);
   	FileInputFormat.setInputPaths(job, file);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
  	List<InputSplit> splits = format.getSplits(job);
   	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);

   InterruptedException ex = assertThrows(InterruptedException.class, ()->reader.initialize(splits.get(0),context),"Exception is thrown in case of wrong password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:25,代碼來源:OfficeFormatHadoopExcelTest.java

示例15: readExcelInputFormatExcel2013SingleSheetEncryptedNegativeLowFootprint

import org.apache.hadoop.io.ArrayWritable; //導入依賴的package包/類
@Test
   public void readExcelInputFormatExcel2013SingleSheetEncryptedNegativeLowFootprint() throws IOException, InterruptedException {
Configuration conf = new Configuration(defaultConf);
   	ClassLoader classLoader = getClass().getClassLoader();
   	String fileName="excel2013encrypt.xlsx";
   	String fileNameSpreadSheet=classLoader.getResource(fileName).getFile();	
   	Path file = new Path(fileNameSpreadSheet);
// set locale to the one of the test data
conf.set("hadoopoffice.read.locale.bcp47","de");

// low footprint
conf.set("hadoopoffice.read.lowFootprint", "true");
// for decryption simply set the password
conf.set("hadoopoffice.read.security.crypt.password","test2");
 	Job job = Job.getInstance(conf);
   	FileInputFormat.setInputPaths(job, file);
TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  	ExcelFileInputFormat format = new ExcelFileInputFormat();
   	List<InputSplit> splits = format.getSplits(job);
   	assertEquals( 1, splits.size(),"Only one split generated for Excel file");
   	RecordReader<Text, ArrayWritable> reader = format.createRecordReader(splits.get(0), context);	
      	InterruptedException ex = assertThrows(InterruptedException.class, ()->reader.initialize(splits.get(0),context),"Exception is thrown in case of wrong password");
   }
 
開發者ID:ZuInnoTe,項目名稱:hadoopoffice,代碼行數:24,代碼來源:OfficeFormatHadoopExcelTest.java


注:本文中的org.apache.hadoop.io.ArrayWritable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。