本文整理汇总了Java中org.apache.hadoop.util.Progressable类的典型用法代码示例。如果您正苦于以下问题:Java Progressable类的具体用法?Java Progressable怎么用?Java Progressable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Progressable类属于org.apache.hadoop.util包,在下文中一共展示了Progressable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: primitiveAppend
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
/**
* Append to an existing file if {@link CreateFlag#APPEND} is present
*/
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
int buffersize, Progressable progress) throws IOException {
if (flag.contains(CreateFlag.APPEND)) {
HdfsFileStatus stat = getFileInfo(src);
if (stat == null) { // No file to append to
// New file needs to be created if create option is present
if (!flag.contains(CreateFlag.CREATE)) {
throw new FileNotFoundException("failed to append to non-existent file "
+ src + " on client " + clientName);
}
return null;
}
return callAppend(src, buffersize, flag, progress, null);
}
return null;
}
示例2: primitiveCreate
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
boolean pathExists = exists(f);
CreateFlag.validate(f, pathExists, flag);
// Default impl assumes that permissions do not matter and
// nor does the bytesPerChecksum hence
// calling the regular create is good enough.
// FSs that implement permissions should override this.
if (pathExists && flag.contains(CreateFlag.APPEND)) {
return append(f, bufferSize, progress);
}
return this.create(f, absolutePermission,
flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
blockSize, progress);
}
示例3: ChecksumFSOutputSummer
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
FsPermission permission)
throws IOException {
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
bufferSize, replication, blockSize,
progress);
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
permission, true, sumBufferSize,
replication, blockSize, null);
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
示例4: create
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
* a hint to where the namenode should place the file blocks.
* The favored nodes hint is not persisted in HDFS. Hence it may be honored
* at the creation time only. HDFS could move the blocks during balancing or
* replication, to move the blocks from favored nodes. A value of null means
* no favored nodes for this create
*/
public DFSOutputStream create(String src,
FsPermission permission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
ChecksumOpt checksumOpt,
InetSocketAddress[] favoredNodes) throws IOException {
checkOpen();
if (permission == null) {
permission = FsPermission.getFileDefault();
}
FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
if(LOG.isDebugEnabled()) {
LOG.debug(src + ": masked=" + masked);
}
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
buffersize, dfsClientConf.createChecksum(checksumOpt),
getFavoredNodesStr(favoredNodes));
beginFileLease(result.getFileId(), result);
return result;
}
示例5: ValuesIterator
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
public ValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
Progressable reporter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.reporter = reporter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
readNextKey();
key = nextKey;
nextKey = null; // force new instance creation
hasNext = more;
}
示例6: createNonRecursive
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
// Check if file should be appended or overwritten. Assume that the file
// is overwritten on if the CREATE and OVERWRITE create flags are set. Note
// that any other combinations of create flags will result in an open new or
// open with append.
final EnumSet<CreateFlag> createflags =
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
boolean overwrite = flags.containsAll(createflags);
// Delegate the create non-recursive call.
return this.createNonRecursive(f, permission, overwrite,
bufferSize, replication, blockSize, progress);
}
示例7: create
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
short arg4, long arg5, Progressable arg6) throws IOException {
LOG.debug("Create, " + retryCount);
if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
return null;
}
示例8: createInternal
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
public FSDataOutputStream createInternal(final Path f,
final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res;
try {
res = fsState.resolve(getUriPath(f), false);
} catch (FileNotFoundException e) {
if (createParent) {
throw readOnlyMountTable("create", f);
} else {
throw e;
}
}
assert(res.remainingPath != null);
return res.targetFileSystem.createInternal(res.remainingPath, flag,
absolutePermission, bufferSize, replication,
blockSize, progress, checksumOpt,
createParent);
}
示例9: primitiveCreate
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
/**
* Same as {{@link #create(String, FsPermission, EnumSet, short, long,
* Progressable, int, ChecksumOpt)} except that the permission
* is absolute (ie has already been masked with umask.
*/
public DFSOutputStream primitiveCreate(String src,
FsPermission absPermission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
ChecksumOpt checksumOpt)
throws IOException, UnresolvedLinkException {
checkOpen();
CreateFlag.validate(flag);
DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
if (result == null) {
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, buffersize,
checksum, null);
}
beginFileLease(result.getFileId(), result);
return result;
}
示例10: merge
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase,
TaskType taskType)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
taskType).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
示例11: create
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
String file = mapToLocal(f).toUri().getPath();
return new FSDataOutputStream(
fsDelegate.open(file, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE), permission.toShort(), true,
bufferSize, getDefaultStripeSize(), getDefaultStripeCount(), -1, null), statistics);
}
示例12: create
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
try {
return newFSDataOutputStreamWrapper(underlyingFs.create(f, progress));
} catch(FSError e) {
throw propagateFSError(e);
}
}
示例13: create
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the path of the file to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
Progressable progress
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
示例14: S3OutputStream
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
public S3OutputStream(Configuration conf, FileSystemStore store,
Path path, long blockSize, Progressable progress,
int buffersize) throws IOException {
this.conf = conf;
this.store = store;
this.path = path;
this.blockSize = blockSize;
this.backupFile = newBackupFile();
this.backupStream = new FileOutputStream(backupFile);
this.bufferSize = buffersize;
this.outBuf = new byte[bufferSize];
}
示例15: primitiveCreate
import org.apache.hadoop.util.Progressable; //导入依赖的package包/类
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
statistics.incrementWriteOps(1);
final DFSOutputStream dfsos = dfs.primitiveCreate(
getPathName(fixRelativePart(f)),
absolutePermission, flag, true, replication, blockSize,
progress, bufferSize, checksumOpt);
return dfs.createWrappedOutputStream(dfsos, statistics);
}