本文整理汇总了Java中org.apache.hadoop.hdfs.DFSUtil.string2Bytes方法的典型用法代码示例。如果您正苦于以下问题:Java DFSUtil.string2Bytes方法的具体用法?Java DFSUtil.string2Bytes怎么用?Java DFSUtil.string2Bytes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSUtil
的用法示例。
在下文中一共展示了DFSUtil.string2Bytes方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: renameReservedComponentOnUpgrade
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support snapshots, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
RESERVED_ERROR_MSG);
component =
DFSUtil.string2Bytes(renameReservedMap
.get(HdfsConstants.DOT_SNAPSHOT_DIR));
}
}
return component;
}
示例2: renameReservedRootComponentOnUpgrade
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support inode IDs, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
RESERVED_ERROR_MSG);
final String renameString = renameReservedMap
.get(FSDirectory.DOT_RESERVED_STRING);
component =
DFSUtil.string2Bytes(renameString);
LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
+ " to " + renameString);
}
}
return component;
}
示例3: testHdfsFileStatus
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
@Test
public void testHdfsFileStatus() throws IOException {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
ObjectReader reader = new ObjectMapper().reader(Map.class);
final HdfsFileStatus s2 =
JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}
示例4: testGetFullPathName
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
@Test
public void testGetFullPathName() {
replication = 3;
preferredBlockSize = 128*1024*1024;
INodeFile inf = createINodeFile(replication, preferredBlockSize);
inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
INodeDirectory.ROOT_NAME, perm, 0L);
INodeDirectory dir = new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,
DFSUtil.string2Bytes("d"), perm, 0L);
assertEquals("f", inf.getFullPathName());
dir.addChild(inf);
assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
root.addChild(dir);
assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
assertEquals(Path.SEPARATOR, root.getFullPathName());
}
示例5: createAbortedLogWithMkdirs
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite();
PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
editLog.abortCurrentLogSegment();
}
示例6: toFileStatus
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
if (json == null) {
return null;
}
final Map<?, ?> m = includesType ?
(Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
final String localName = (String) m.get("pathSuffix");
final PathType type = PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != PathType.SYMLINK? null
: DFSUtil.string2Bytes((String)m.get("symlink"));
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
final String group = (String) m.get("group");
final FsPermission permission = toFsPermission((String) m.get("permission"),
(Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
final short replication = ((Number) m.get("replication")).shortValue();
final long fileId = m.containsKey("fileId") ?
((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
BlockStoragePolicySuite.ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, symlink,
DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
示例7: verifySnapshotName
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/** Verify if the snapshot name is legal. */
static void verifySnapshotName(FSDirectory fsd, String snapshotName,
String path)
throws FSLimitException.PathComponentTooLongException {
if (snapshotName.contains(Path.SEPARATOR)) {
throw new HadoopIllegalArgumentException(
"Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
}
final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
fsd.verifyINodeName(bytes);
fsd.verifyMaxComponentLength(bytes, path);
}
示例8: getSnapshottableDirListing
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* List all the snapshottable directories that are owned by the current user.
* @param userName Current user name.
* @return Snapshottable directories that are owned by the current user,
* represented as an array of {@link SnapshottableDirectoryStatus}. If
* {@code userName} is null, return all the snapshottable dirs.
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
String userName) {
if (snapshottables.isEmpty()) {
return null;
}
List<SnapshottableDirectoryStatus> statusList =
new ArrayList<SnapshottableDirectoryStatus>();
for (INodeDirectory dir : snapshottables.values()) {
if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(),
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getDirectorySnapshottableFeature().getNumSnapshots(),
dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
dir.getParent() == null ? DFSUtil.EMPTY_BYTES :
DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
statusList.add(status);
}
}
Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
return statusList.toArray(
new SnapshottableDirectoryStatus[statusList.size()]);
}
示例9: renameSnapshot
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* Rename a snapshot
* @param path
* The directory path where the snapshot was taken. Used for
* generating exception message.
* @param oldName
* Old name of the snapshot
* @param newName
* New name the snapshot will be renamed to
* @throws SnapshotException
* Throw SnapshotException when either the snapshot with the old
* name does not exist or a snapshot with the new name already
* exists
*/
public void renameSnapshot(String path, String oldName, String newName)
throws SnapshotException {
if (newName.equals(oldName)) {
return;
}
final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
if (indexOfOld < 0) {
throw new SnapshotException("The snapshot " + oldName
+ " does not exist for directory " + path);
} else {
final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
int indexOfNew = searchSnapshot(newNameBytes);
if (indexOfNew >= 0) {
throw new SnapshotException("The snapshot " + newName
+ " already exists for directory " + path);
}
// remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
final INodeDirectory ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newNameBytes);
indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot);
} else { // indexOfNew > indexOfOld
snapshotsByNames.add(indexOfNew - 1, snapshot);
}
}
}
示例10: getPathComponents
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/** Convert strings to byte arrays for path components. */
static byte[][] getPathComponents(String[] strings) {
if (strings.length == 0) {
return new byte[][]{null};
}
byte[][] bytes = new byte[strings.length][];
for (int i = 0; i < strings.length; i++)
bytes[i] = DFSUtil.string2Bytes(strings[i]);
return bytes;
}
示例11: testIdCmp
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test (timeout=300000)
public void testIdCmp() {
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable = new INodeDirectory(0,
DFSUtil.string2Bytes("foo"), perm, 0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots = {
new Snapshot(1, "s1", snapshottable),
new Snapshot(1, "s1", snapshottable),
new Snapshot(2, "s2", snapshottable),
new Snapshot(2, "s2", snapshottable),
};
Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
for(Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
for(Snapshot t : snapshots) {
final int expected = s.getRoot().getLocalName().compareTo(
t.getRoot().getLocalName());
final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
Assert.assertEquals(expected > 0, computed > 0);
Assert.assertEquals(expected == 0, computed == 0);
Assert.assertEquals(expected < 0, computed < 0);
}
}
}
示例12: INodeSymlink
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
INodeSymlink(long id, byte[] name, PermissionStatus permissions,
long mtime, long atime, String symlink) {
super(id, name, permissions, mtime, atime);
this.symlink = DFSUtil.string2Bytes(symlink);
}
示例13: testFsckFileNotFound
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short NUM_REPLICAS = 1;
Configuration conf = new Configuration();
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String,String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockLocations(any(FSPermissionChecker.class), anyString(),
anyLong(), anyLong(),
anyBoolean(), anyBoolean()))
.thenThrow(new FileNotFoundException());
when(fsName.getBlockManager()).thenReturn(blockManager);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_REPLICAS, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 *1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte [] symlink = null;
byte [] path = new byte[128];
path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
blockSize, modTime, accessTime, perms, owner, group, symlink, path,
fileId, numChildren, null, storagePolicy);
Result res = new Result(conf);
try {
fsck.check(pathString, file, res);
} catch (Exception e) {
fail("Unexpected exception "+ e.getMessage());
}
assertTrue(res.toString().contains("HEALTHY"));
}
示例14: newINode
import org.apache.hadoop.hdfs.DFSUtil; //导入方法依赖的package包/类
static INode newINode(int n, int width) {
byte[] name = DFSUtil.string2Bytes(String.format("n%0" + width + "d", n));
return new INodeDirectory(n, name, PERM, 0L);
}