本文整理汇总了Java中org.apache.cassandra.dht.IPartitioner.decorateKey方法的典型用法代码示例。如果您正苦于以下问题:Java IPartitioner.decorateKey方法的具体用法?Java IPartitioner.decorateKey怎么用?Java IPartitioner.decorateKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.dht.IPartitioner
的用法示例。
在下文中一共展示了IPartitioner.decorateKey方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deserializeFirstLastKey
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
/**
* Deserializes the first and last key stored in the summary
*
* Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used.
*/
public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException
{
in.skipBytes(4); // minIndexInterval
int offsetCount = in.readInt();
long offheapSize = in.readLong();
if (haveSamplingLevel)
in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
in.skip(offsetCount * 4);
in.skip(offheapSize - offsetCount * 4);
DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
return Pair.create(first, last);
}
示例2: testMurmur3FilterHash
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
@Test
public void testMurmur3FilterHash()
{
IPartitioner partitioner = new Murmur3Partitioner();
Iterator<ByteBuffer> gen = new KeyGenerator.RandomStringGenerator(new Random().nextInt(), FilterTestHelper.ELEMENTS);
long[] expected = new long[2];
long[] actual = new long[2];
while (gen.hasNext())
{
expected[0] = 1;
expected[1] = 2;
actual[0] = 3;
actual[1] = 4;
ByteBuffer key = gen.next();
FilterKey expectedKey = FilterTestHelper.wrap(key);
FilterKey actualKey = partitioner.decorateKey(key);
actualKey.filterHash(actual);
expectedKey.filterHash(expected);
Assert.assertArrayEquals(expected, actual);
}
}
示例3: importSorted
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
private int importSorted(String jsonFile, ColumnFamily columnFamily, String ssTablePath,
IPartitioner partitioner) throws IOException
{
int importedKeys = 0; // already imported keys count
long start = System.nanoTime();
JsonParser parser = getParser(jsonFile);
if (keyCountToImport == null)
{
keyCountToImport = 0;
System.out.println("Counting keys to import, please wait... (NOTE: to skip this use -n <num_keys>)");
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
parser.skipChildren();
if (parser.getCurrentToken() == JsonToken.END_ARRAY)
break;
keyCountToImport++;
}
}
System.out.printf("Importing %s keys...%n", keyCountToImport);
parser = getParser(jsonFile); // renewing parser
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);
int lineNumber = 1;
DecoratedKey prevStoredKey = null;
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
String key = parser.getCurrentName();
Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
DecoratedKey currentKey = partitioner.decorateKey(getKeyValidator(columnFamily).fromString((String) row.get("key")));
if (row.containsKey("metadata"))
parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
addColumnsToCF((List<?>) row.get("cells"), columnFamily);
if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
{
System.err
.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
lineNumber, key);
return -1;
}
// saving decorated key
writer.append(currentKey, columnFamily);
columnFamily.clear();
prevStoredKey = currentKey;
importedKeys++;
lineNumber++;
long current = System.nanoTime();
if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
{
System.out.printf("Currently imported %d keys.%n", importedKeys);
start = current;
}
if (keyCountToImport == importedKeys)
break;
}
writer.closeAndOpenReader();
return importedKeys;
}
示例4: export
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
/**
* Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
*
* @param desc the descriptor of the sstable to read from
* @param outs PrintStream to write the output to
* @param toExport the keys corresponding to the rows to export
* @param excludes keys to exclude from export
* @param metadata Metadata to print keys in a proper format
* @throws IOException on failure to read/write input/output
*/
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes, CFMetaData metadata) throws IOException
{
SSTableReader sstable = SSTableReader.open(desc);
RandomAccessReader dfile = sstable.openDataReader();
try
{
IPartitioner partitioner = sstable.partitioner;
if (excludes != null)
toExport.removeAll(Arrays.asList(excludes));
outs.println("[");
int i = 0;
// last key to compare order
DecoratedKey lastKey = null;
for (String key : toExport)
{
DecoratedKey decoratedKey = partitioner.decorateKey(metadata.getKeyValidator().fromString(key));
if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);
lastKey = decoratedKey;
RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
if (entry == null)
continue;
dfile.seek(entry.position);
ByteBufferUtil.readWithShortLength(dfile); // row key
DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version);
checkStream(outs);
if (i != 0)
outs.println(",");
i++;
serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
}
outs.println("\n]");
outs.flush();
}
finally
{
dfile.close();
}
}
示例5: importSorted
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
private int importSorted(String jsonFile, ColumnFamily columnFamily, String ssTablePath,
IPartitioner<?> partitioner) throws IOException
{
int importedKeys = 0; // already imported keys count
long start = System.nanoTime();
JsonParser parser = getParser(jsonFile);
if (keyCountToImport == null)
{
keyCountToImport = 0;
System.out.println("Counting keys to import, please wait... (NOTE: to skip this use -n <num_keys>)");
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
parser.skipChildren();
if (parser.getCurrentToken() == JsonToken.END_ARRAY)
break;
keyCountToImport++;
}
}
System.out.printf("Importing %s keys...%n", keyCountToImport);
parser = getParser(jsonFile); // renewing parser
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);
int lineNumber = 1;
DecoratedKey prevStoredKey = null;
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
String key = parser.getCurrentName();
Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
DecoratedKey currentKey = partitioner.decorateKey(hexToBytes((String) row.get("key")));
if (row.containsKey("metadata"))
parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
addToSuperCF((Map<?, ?>)row.get("columns"), columnFamily);
else
addToStandardCF((List<?>)row.get("columns"), columnFamily);
if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
{
System.err
.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
lineNumber, key);
return -1;
}
// saving decorated key
writer.append(currentKey, columnFamily);
columnFamily.clear();
prevStoredKey = currentKey;
importedKeys++;
lineNumber++;
long current = System.nanoTime();
if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
{
System.out.printf("Currently imported %d keys.%n", importedKeys);
start = current;
}
if (keyCountToImport == importedKeys)
break;
}
writer.closeAndOpenReader();
return importedKeys;
}
示例6: export
import org.apache.cassandra.dht.IPartitioner; //导入方法依赖的package包/类
/**
* Export specific rows from an SSTable and write the resulting JSON to a PrintStream.
*
* @param desc the descriptor of the sstable to read from
* @param outs PrintStream to write the output to
* @param toExport the keys corresponding to the rows to export
* @param excludes keys to exclude from export
* @throws IOException on failure to read/write input/output
*/
public static void export(Descriptor desc, PrintStream outs, Collection<String> toExport, String[] excludes) throws IOException
{
SSTableReader sstable = SSTableReader.open(desc);
RandomAccessReader dfile = sstable.openDataReader();
IPartitioner<?> partitioner = sstable.partitioner;
if (excludes != null)
toExport.removeAll(Arrays.asList(excludes));
outs.println("[");
int i = 0;
// last key to compare order
DecoratedKey lastKey = null;
for (String key : toExport)
{
DecoratedKey decoratedKey = partitioner.decorateKey(hexToBytes(key));
if (lastKey != null && lastKey.compareTo(decoratedKey) > 0)
throw new IOException("Key out of order! " + lastKey + " > " + decoratedKey);
lastKey = decoratedKey;
RowIndexEntry entry = sstable.getPosition(decoratedKey, SSTableReader.Operator.EQ);
if (entry == null)
continue;
dfile.seek(entry.position);
ByteBufferUtil.readWithShortLength(dfile); // row key
if (sstable.descriptor.version.hasRowSizeAndColumnCount)
dfile.readLong(); // row size
DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile));
int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt() : Integer.MAX_VALUE;
Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, columnCount, sstable.descriptor.version);
checkStream(outs);
if (i != 0)
outs.println(",");
i++;
serializeRow(deletionInfo, atomIterator, sstable.metadata, decoratedKey, outs);
}
outs.println("\n]");
outs.flush();
}