本文整理匯總了Java中org.apache.hadoop.util.Progressable類的典型用法代碼示例。如果您正苦於以下問題:Java Progressable類的具體用法?Java Progressable怎麽用?Java Progressable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
Progressable類屬於org.apache.hadoop.util包,在下文中一共展示了Progressable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: primitiveAppend
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
/**
* Append to an existing file if {@link CreateFlag#APPEND} is present
*/
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
int buffersize, Progressable progress) throws IOException {
if (flag.contains(CreateFlag.APPEND)) {
HdfsFileStatus stat = getFileInfo(src);
if (stat == null) { // No file to append to
// New file needs to be created if create option is present
if (!flag.contains(CreateFlag.CREATE)) {
throw new FileNotFoundException("failed to append to non-existent file "
+ src + " on client " + clientName);
}
return null;
}
return callAppend(src, buffersize, flag, progress, null);
}
return null;
}
示例2: primitiveCreate
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
boolean pathExists = exists(f);
CreateFlag.validate(f, pathExists, flag);
// Default impl assumes that permissions do not matter and
// nor does the bytesPerChecksum hence
// calling the regular create is good enough.
// FSs that implement permissions should override this.
if (pathExists && flag.contains(CreateFlag.APPEND)) {
return append(f, bufferSize, progress);
}
return this.create(f, absolutePermission,
flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
blockSize, progress);
}
示例3: ChecksumFSOutputSummer
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
FsPermission permission)
throws IOException {
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
bufferSize, replication, blockSize,
progress);
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
permission, true, sumBufferSize,
replication, blockSize, null);
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
示例4: create
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
* a hint to where the namenode should place the file blocks.
* The favored nodes hint is not persisted in HDFS. Hence it may be honored
* at the creation time only. HDFS could move the blocks during balancing or
* replication, to move the blocks from favored nodes. A value of null means
* no favored nodes for this create
*/
public DFSOutputStream create(String src,
FsPermission permission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
ChecksumOpt checksumOpt,
InetSocketAddress[] favoredNodes) throws IOException {
checkOpen();
if (permission == null) {
permission = FsPermission.getFileDefault();
}
FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
if(LOG.isDebugEnabled()) {
LOG.debug(src + ": masked=" + masked);
}
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
buffersize, dfsClientConf.createChecksum(checksumOpt),
getFavoredNodesStr(favoredNodes));
beginFileLease(result.getFileId(), result);
return result;
}
示例5: ValuesIterator
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
public ValuesIterator (RawKeyValueIterator in,
RawComparator<KEY> comparator,
Class<KEY> keyClass,
Class<VALUE> valClass, Configuration conf,
Progressable reporter)
throws IOException {
this.in = in;
this.comparator = comparator;
this.reporter = reporter;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(keyIn);
this.valDeserializer = serializationFactory.getDeserializer(valClass);
this.valDeserializer.open(this.valueIn);
readNextKey();
key = nextKey;
nextKey = null; // force new instance creation
hasNext = more;
}
示例6: createNonRecursive
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
// Check if file should be appended or overwritten. Assume that the file
// is overwritten on if the CREATE and OVERWRITE create flags are set. Note
// that any other combinations of create flags will result in an open new or
// open with append.
final EnumSet<CreateFlag> createflags =
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
boolean overwrite = flags.containsAll(createflags);
// Delegate the create non-recursive call.
return this.createNonRecursive(f, permission, overwrite,
bufferSize, replication, blockSize, progress);
}
示例7: create
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
short arg4, long arg5, Progressable arg6) throws IOException {
LOG.debug("Create, " + retryCount);
if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
return null;
}
示例8: createInternal
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
public FSDataOutputStream createInternal(final Path f,
final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
final int bufferSize, final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res;
try {
res = fsState.resolve(getUriPath(f), false);
} catch (FileNotFoundException e) {
if (createParent) {
throw readOnlyMountTable("create", f);
} else {
throw e;
}
}
assert(res.remainingPath != null);
return res.targetFileSystem.createInternal(res.remainingPath, flag,
absolutePermission, bufferSize, replication,
blockSize, progress, checksumOpt,
createParent);
}
示例9: primitiveCreate
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
/**
* Same as {{@link #create(String, FsPermission, EnumSet, short, long,
* Progressable, int, ChecksumOpt)} except that the permission
* is absolute (ie has already been masked with umask.
*/
public DFSOutputStream primitiveCreate(String src,
FsPermission absPermission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
ChecksumOpt checksumOpt)
throws IOException, UnresolvedLinkException {
checkOpen();
CreateFlag.validate(flag);
DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
if (result == null) {
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, buffersize,
checksum, null);
}
beginFileLease(result.getFileId(), result);
return result;
}
示例10: merge
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
Class<K> keyClass, Class<V> valueClass,
CompressionCodec codec,
List<Segment<K, V>> segments,
int mergeFactor, Path tmpDir,
RawComparator<K> comparator, Progressable reporter,
boolean sortSegments,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase,
TaskType taskType)
throws IOException {
return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
sortSegments, codec,
taskType).merge(keyClass, valueClass,
mergeFactor, tmpDir,
readsCounter, writesCounter,
mergePhase);
}
示例11: create
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress) throws IOException {
String file = mapToLocal(f).toUri().getPath();
return new FSDataOutputStream(
fsDelegate.open(file, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE), permission.toShort(), true,
bufferSize, getDefaultStripeSize(), getDefaultStripeCount(), -1, null), statistics);
}
示例12: create
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
try {
return newFSDataOutputStreamWrapper(underlyingFs.create(f, progress));
} catch(FSError e) {
throw propagateFSError(e);
}
}
示例13: create
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the path of the file to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
Progressable progress
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
示例14: S3OutputStream
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
public S3OutputStream(Configuration conf, FileSystemStore store,
Path path, long blockSize, Progressable progress,
int buffersize) throws IOException {
this.conf = conf;
this.store = store;
this.path = path;
this.blockSize = blockSize;
this.backupFile = newBackupFile();
this.backupStream = new FileOutputStream(backupFile);
this.bufferSize = buffersize;
this.outBuf = new byte[bufferSize];
}
示例15: primitiveCreate
import org.apache.hadoop.util.Progressable; //導入依賴的package包/類
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
statistics.incrementWriteOps(1);
final DFSOutputStream dfsos = dfs.primitiveCreate(
getPathName(fixRelativePart(f)),
absolutePermission, flag, true, replication, blockSize,
progress, bufferSize, checksumOpt);
return dfs.createWrappedOutputStream(dfsos, statistics);
}