本文整理汇总了Java中org.apache.flink.core.fs.FileInputSplit.getPath方法的典型用法代码示例。如果您正苦于以下问题:Java FileInputSplit.getPath方法的具体用法?Java FileInputSplit.getPath怎么用?Java FileInputSplit.getPath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.flink.core.fs.FileInputSplit
的用法示例。
在下文中一共展示了FileInputSplit.getPath方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: open
import org.apache.flink.core.fs.FileInputSplit; //导入方法依赖的package包/类
/**
* Opens an input stream to the file defined in the input format.
* The stream is positioned at the beginning of the given split.
* <p>
* The stream is actually opened in an asynchronous thread to make sure any interruptions to the thread
* working on the input format do not reach the file system.
*/
@Override
public void open(FileInputSplit fileSplit) throws IOException {
this.currentSplit = fileSplit;
this.splitStart = fileSplit.getStart();
this.splitLength = fileSplit.getLength();
if (LOG.isDebugEnabled()) {
LOG.debug("Opening input split " + fileSplit.getPath() + " [" + this.splitStart + "," + this.splitLength + "]");
}
// open the split in an asynchronous thread
final InputSplitOpenThread isot = new InputSplitOpenThread(fileSplit, this.openTimeout);
isot.start();
try {
this.stream = isot.waitForCompletion();
this.stream = decorateInputStream(this.stream, fileSplit);
}
catch (Throwable t) {
throw new IOException("Error opening the Input Split " + fileSplit.getPath() +
" [" + splitStart + "," + splitLength + "]: " + t.getMessage(), t);
}
// get FSDataInputStream
if (this.splitStart != 0) {
this.stream.seek(this.splitStart);
}
}
示例2: testReadOverSplitBoundariesUnaligned
import org.apache.flink.core.fs.FileInputSplit; //导入方法依赖的package包/类
/**
* Tests that the records are read correctly when the split boundary is in the middle of a record.
*/
@Test
public void testReadOverSplitBoundariesUnaligned() throws IOException {
final String myString = "value1\nvalue2\nvalue3";
final FileInputSplit split = createTempFile(myString);
FileInputSplit split1 = new FileInputSplit(0, split.getPath(), 0, split.getLength() / 2, split.getHostnames());
FileInputSplit split2 = new FileInputSplit(1, split.getPath(), split1.getLength(), split.getLength(), split.getHostnames());
final Configuration parameters = new Configuration();
format.configure(parameters);
format.open(split1);
assertEquals("value1", format.nextRecord(null));
assertEquals("value2", format.nextRecord(null));
assertNull(format.nextRecord(null));
assertTrue(format.reachedEnd());
format.close();
format.open(split2);
assertEquals("value3", format.nextRecord(null));
assertNull(format.nextRecord(null));
assertTrue(format.reachedEnd());
format.close();
}
示例3: testReadRecordsLargerThanBuffer
import org.apache.flink.core.fs.FileInputSplit; //导入方法依赖的package包/类
@Test
public void testReadRecordsLargerThanBuffer() throws IOException {
final String myString = "aaaaaaaaaaaaaaaaaaaaa\n" +
"bbbbbbbbbbbbbbbbbbbbbbbbb\n" +
"ccccccccccccccccccc\n" +
"ddddddddddddddddddddddddddddddddddd\n";
final FileInputSplit split = createTempFile(myString);
FileInputSplit split1 = new FileInputSplit(0, split.getPath(), 0, split.getLength() / 2, split.getHostnames());
FileInputSplit split2 = new FileInputSplit(1, split.getPath(), split1.getLength(), split.getLength(), split.getHostnames());
final Configuration parameters = new Configuration();
format.setBufferSize(8);
format.configure(parameters);
String next;
List<String> result = new ArrayList<String>();
format.open(split1);
while ((next = format.nextRecord(null)) != null) {
result.add(next);
}
assertNull(format.nextRecord(null));
assertTrue(format.reachedEnd());
format.close();
format.open(split2);
while ((next = format.nextRecord(null)) != null) {
result.add(next);
}
assertNull(format.nextRecord(null));
assertTrue(format.reachedEnd());
format.close();
assertEquals(4, result.size());
assertEquals(Arrays.asList(myString.split("\n")), result);
}
示例4: getTimestampedSplit
import org.apache.flink.core.fs.FileInputSplit; //导入方法依赖的package包/类
private TimestampedFileInputSplit getTimestampedSplit(long modTime, FileInputSplit split) {
Preconditions.checkNotNull(split);
return new TimestampedFileInputSplit(
modTime,
split.getSplitNumber(),
split.getPath(),
split.getStart(),
split.getLength(),
split.getHostnames());
}
示例5: testReadWithBufferSizeIsMultiple
import org.apache.flink.core.fs.FileInputSplit; //导入方法依赖的package包/类
/**
* Tests that the correct number of records is read when the split boundary is exact at the record boundary.
*/
@Test
public void testReadWithBufferSizeIsMultiple() throws IOException {
final String myString = "aaaaaaa\nbbbbbbb\nccccccc\nddddddd\n";
final FileInputSplit split = createTempFile(myString);
FileInputSplit split1 = new FileInputSplit(0, split.getPath(), 0, split.getLength() / 2, split.getHostnames());
FileInputSplit split2 = new FileInputSplit(1, split.getPath(), split1.getLength(), split.getLength(), split.getHostnames());
final Configuration parameters = new Configuration();
format.setBufferSize(2 * ((int) split1.getLength()));
format.configure(parameters);
String next;
int count = 0;
// read split 1
format.open(split1);
while ((next = format.nextRecord(null)) != null) {
assertEquals(7, next.length());
count++;
}
assertNull(format.nextRecord(null));
assertTrue(format.reachedEnd());
format.close();
// this one must have read one too many, because the next split will skipp the trailing remainder
// which happens to be one full record
assertEquals(3, count);
// read split 2
format.open(split2);
while ((next = format.nextRecord(null)) != null) {
assertEquals(7, next.length());
count++;
}
format.close();
assertEquals(4, count);
}