本文整理汇总了Java中org.apache.hadoop.hdfs.AppendTestUtil.initBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java AppendTestUtil.initBuffer方法的具体用法?Java AppendTestUtil.initBuffer怎么用?Java AppendTestUtil.initBuffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.AppendTestUtil
的用法示例。
在下文中一共展示了AppendTestUtil.initBuffer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doTestReceiveAndMirror
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr,
int dataLen, int checksumsLen) throws IOException {
final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);
byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
ByteArrayInputStream in = new ByteArrayInputStream(packet);
pr.receiveNextPacket(in);
ByteBuffer parsedData = pr.getDataSlice();
assertArrayEquals(DATA, remainingAsArray(parsedData));
ByteBuffer parsedChecksums = pr.getChecksumSlice();
assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
PacketHeader header = pr.getHeader();
assertEquals(SEQNO, header.getSeqno());
assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
// Mirror the packet to an output stream and make sure it matches
// the packet we sent.
ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
mirrored = Mockito.spy(mirrored);
pr.mirrorPacketTo(new DataOutputStream(mirrored));
// The write should be done in a single call. Otherwise we may hit
// nasty interactions with nagling (eg HDFS-4049).
Mockito.verify(mirrored, Mockito.times(1))
.write(Mockito.<byte[]>any(), Mockito.anyInt(),
Mockito.eq(packet.length));
Mockito.verifyNoMoreInteractions(mirrored);
assertArrayEquals(packet, mirrored.toByteArray());
}
示例2: testBasicTruncate
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* Truncate files of different sizes byte by byte.
*/
@Test
public void testBasicTruncate() throws IOException {
int startingFileSize = 3 * BLOCK_SIZE;
Path parent = new Path("/test");
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
for (int fileLength = startingFileSize; fileLength > 0;
fileLength -= BLOCK_SIZE - 1) {
for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
final Path p = new Path(parent, "testBasicTruncate" + fileLength);
writeContents(contents, fileLength, p);
int newLength = fileLength - toTruncate;
boolean isReady = fs.truncate(p, newLength);
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
assertEquals("File must be closed for zero truncate"
+ " or truncating at the block boundary",
isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
if (!isReady) {
checkBlockRecovery(p);
}
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(p, newLength, contents);
}
}
fs.delete(parent, true);
}
示例3: testTruncateWithDataNodesShutdownImmediately
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* shutdown the datanodes immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");
writeContents(contents, startingFileSize, p);
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.shutdownDataNodes();
cluster.setDataNodesDead();
try {
for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
Thread.sleep(SLEEP);
}
assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
LocatedBlocks blocks = getLocatedBlocks(p);
assertTrue(blocks.isUnderConstruction());
} finally {
cluster.startDataNodes(conf, DATANODE_NUM, true,
StartupOption.REGULAR, null);
cluster.waitActive();
}
checkBlockRecovery(p);
fs.delete(parent, true);
}
示例4: testTruncateEditLogLoad
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* EditLogOp load test for Truncate.
*/
@Test
public void testTruncateEditLogLoad() throws IOException {
// purge previously accumulated edits
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
final String s = "/testTruncateEditLogLoad";
final Path p = new Path(s);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
writeContents(contents, startingFileSize, p);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
cluster.restartNameNode();
String holder = UserGroupInformation.getCurrentUser().getUserName();
cluster.getNamesystem().recoverLease(s, holder, "");
checkBlockRecovery(p);
checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
示例5: testTruncate4Symlink
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test
public void testTruncate4Symlink() throws IOException {
final int fileLength = 3 * BLOCK_SIZE;
final Path parent = new Path("/test");
fs.mkdirs(parent);
final byte[] contents = AppendTestUtil.initBuffer(fileLength);
final Path file = new Path(parent, "testTruncate4Symlink");
writeContents(contents, fileLength, file);
final Path link = new Path(parent, "link");
fs.createSymlink(file, link, false);
final int newLength = fileLength/3;
boolean isReady = fs.truncate(link, newLength);
assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fs.getFileStatus(file);
assertThat(fileStatus.getLen(), is((long) newLength));
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(file, newLength, contents);
fs.delete(parent, true);
}
示例6: testHSyncBlockBoundary
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSyncBlockBoundary/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 1, len, null);
// fill exactly one block (tests the SYNC_BLOCK case) and flush
out.write(fileContents, 0, len);
out.hflush();
// the full block should have caused a sync
checkSyncMetric(cluster, 1);
out.hsync();
// first on block again
checkSyncMetric(cluster, 1);
// write one more byte and sync again
out.write(1);
out.hsync();
checkSyncMetric(cluster, 2);
out.close();
checkSyncMetric(cluster, 3);
cluster.shutdown();
}
示例7: testBasicTruncate
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* Truncate files of different sizes byte by byte.
*/
@Test
public void testBasicTruncate() throws IOException {
int startingFileSize = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
for (int fileLength = startingFileSize; fileLength > 0;
fileLength -= BLOCK_SIZE - 1) {
for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
final Path p = new Path(parent, "testBasicTruncate" + fileLength);
writeContents(contents, fileLength, p);
int newLength = fileLength - toTruncate;
boolean isReady = fs.truncate(p, newLength);
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
assertEquals("File must be closed for zero truncate"
+ " or truncating at the block boundary",
isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
if (!isReady) {
checkBlockRecovery(p);
}
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(p, newLength, contents);
}
}
fs.delete(parent, true);
}
示例8: testTruncateWithDataNodesShutdownImmediately
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* shutdown the datanodes immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");
writeContents(contents, startingFileSize, p);
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.shutdownDataNodes();
cluster.setDataNodesDead();
try {
for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
Thread.sleep(SLEEP);
}
assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
LocatedBlocks blocks = getLocatedBlocks(p);
assertTrue(blocks.isUnderConstruction());
} finally {
cluster.startDataNodes(conf, DATANODE_NUM, true,
StartupOption.REGULAR, null);
cluster.waitActive();
}
checkBlockRecovery(p);
fs.delete(parent, true);
}
示例9: testTruncate4Symlink
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test
public void testTruncate4Symlink() throws IOException {
final int fileLength = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
final byte[] contents = AppendTestUtil.initBuffer(fileLength);
final Path file = new Path(parent, "testTruncate4Symlink");
writeContents(contents, fileLength, file);
final Path link = new Path(parent, "link");
fs.createSymlink(file, link, false);
final int newLength = fileLength/3;
boolean isReady = fs.truncate(link, newLength);
assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fs.getFileStatus(file);
assertThat(fileStatus.getLen(), is((long) newLength));
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(file, newLength, contents);
fs.delete(parent, true);
}
示例10: doTestReceiveAndMirror
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr,
int dataLen, int checksumsLen) throws IOException {
final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);
byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
ByteArrayInputStream in = new ByteArrayInputStream(packet);
pr.receiveNextPacket(in);
ByteBuffer parsedData = pr.getDataSlice();
assertArrayEquals(DATA, remainingAsArray(parsedData));
ByteBuffer parsedChecksums = pr.getChecksumSlice();
assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
PacketHeader header = pr.getHeader();
assertEquals(SEQNO, header.getSeqno());
assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
// Mirror the packet to an output stream and make sure it matches
// the packet we sent.
ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
mirrored = Mockito.spy(mirrored);
pr.mirrorPacketTo(new DataOutputStream(mirrored));
// The write should be done in a single call. Otherwise we may hit
// nasty interactions with nagling (eg HDFS-4049).
Mockito.verify(mirrored, Mockito.times(1))
.write(Mockito.<byte[]>any(), Mockito.anyInt(),
Mockito.eq(packet.length));
Mockito.verifyNoMoreInteractions(mirrored);
assertArrayEquals(packet, mirrored.toByteArray());
}
示例11: doTestReceiveAndMirror
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr, int dataLen,
int checksumsLen) throws IOException {
final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);
byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
ByteArrayInputStream in = new ByteArrayInputStream(packet);
pr.receiveNextPacket(in);
ByteBuffer parsedData = pr.getDataSlice();
assertArrayEquals(DATA, remainingAsArray(parsedData));
ByteBuffer parsedChecksums = pr.getChecksumSlice();
assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
PacketHeader header = pr.getHeader();
assertEquals(SEQNO, header.getSeqno());
assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
// Mirror the packet to an output stream and make sure it matches
// the packet we sent.
ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
mirrored = Mockito.spy(mirrored);
pr.mirrorPacketTo(new DataOutputStream(mirrored));
// The write should be done in a single call. Otherwise we may hit
// nasty interactions with nagling (eg HDFS-4049).
Mockito.verify(mirrored, Mockito.times(1))
.write(Mockito.<byte[]>any(), Mockito.anyInt(),
Mockito.eq(packet.length));
Mockito.verifyNoMoreInteractions(mirrored);
assertArrayEquals(packet, mirrored.toByteArray());
}
示例12: testHSyncBlockBoundary
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* Test hsync on an exact block boundary
*/
@Test
public void testHSyncBlockBoundary() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/testHSyncBlockBoundary/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
4096, (short) 1, len, null);
// fill exactly one block (tests the SYNC_BLOCK case) and flush
out.write(fileContents, 0, len);
out.hflush();
// the full block should have caused a sync
checkSyncMetric(cluster, 1);
out.hsync();
// first on block again
checkSyncMetric(cluster, 1);
// write one more byte and sync again
out.write(1);
out.hsync();
checkSyncMetric(cluster, 2);
out.close();
checkSyncMetric(cluster, 3);
cluster.shutdown();
}
示例13: testTruncateWithDataNodesRestart
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn = 0;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
* The truncated block will be replicated to dn0 after it restarts.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}
示例14: testCopyOnTruncateWithDataNodesRestart
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (copy-on-truncate)
* dn1 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
fs.allowSnapshot(parent);
fs.createSnapshot(parent, "ss0");
int dn = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For copy-on-truncate, new block is made with new block id and new GS.
* The replicas of the new block is 2, then it will be replicated to dn1.
*/
assertNotEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// New block is replicated to dn1
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
// Old replica exists too since there is snapshot
assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(),
oldBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
oldBlock.getBlock()).getName().endsWith(
oldBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.deleteSnapshot(parent, "ss0");
fs.delete(parent, true);
}
示例15: testTruncateWithDataNodesRestartImmediately
import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0, dn1 are restarted immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn0 = 0;
int dn1 = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.restartDataNode(dn0, true, true);
cluster.restartDataNode(dn1, true, true);
cluster.waitActive();
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one on dn0
assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn0,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Old replica is disregarded and replaced with the truncated one on dn1
assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn1,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}