本文整理汇总了Java中org.apache.cassandra.io.sstable.SSTableWriter.append方法的典型用法代码示例。如果您正苦于以下问题:Java SSTableWriter.append方法的具体用法?Java SSTableWriter.append怎么用?Java SSTableWriter.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.io.sstable.SSTableWriter
的用法示例。
在下文中一共展示了SSTableWriter.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAsciiKeyValidator
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
@Test
public void testAsciiKeyValidator() throws IOException, ParseException
{
File tempSS = tempSSTableFile("Keyspace1", "AsciiKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "AsciiKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column("column", "value", 1L));
writer.append(Util.dk("key", AsciiType.instance), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithAsciiKeys", ".json");
SSTableExport.export(reader,
new PrintStream(tempJson.getPath()),
new String[0],
CFMetaData.sparseCFMetaData("Keyspace1", "AsciiKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
// check row key
assertEquals("key", row.get("key"));
}
示例2: getDummyWriter
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
public SSTableWriter getDummyWriter() throws IOException
{
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
ColumnFamily cfamily = TreeMapBackedSortedColumns.factory.create("Keyspace1", "Standard1");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
// Add rowA
cfamily.addColumn(ByteBufferUtil.bytes("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
writer.append(Util.dk("rowA"), cfamily);
cfamily.clear();
cfamily.addColumn(ByteBufferUtil.bytes("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
writer.append(Util.dk("rowB"), cfamily);
cfamily.clear();
return writer;
}
示例3: testAsciiKeyValidator
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
@Test
public void testAsciiKeyValidator() throws IOException, ParseException
{
File tempSS = tempSSTableFile(KEYSPACE1, "AsciiKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create(KEYSPACE1, "AsciiKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column("column", "value", 1L));
writer.append(Util.dk("key", AsciiType.instance), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithAsciiKeys", ".json");
SSTableExport.export(reader,
new PrintStream(tempJson.getPath()),
new String[0],
CFMetaData.sparseCFMetaData(KEYSPACE1, "AsciiKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
// check row key
assertEquals("key", row.get("key"));
}
示例4: getDummyWriter
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
public SSTableWriter getDummyWriter() throws IOException
{
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add rowA
cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
writer.append(Util.dk("rowA"), cfamily);
cfamily.clear();
cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
writer.append(Util.dk("rowB"), cfamily);
cfamily.clear();
return writer;
}
示例5: writeSortedContents
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context) throws IOException
{
logger.info("Writing " + this);
long keySize = 0;
for (DecoratedKey key : columnFamilies.keySet())
keySize += key.key.remaining();
long estimatedSize = (long) ((keySize // index entries
+ keySize // keys in data file
+ currentThroughput.get()) // data
* 1.2); // bloom filter and row index overhead
SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize, context);
// (we can't clear out the map as-we-go to free up memory,
// since the memtable is being used for queries in the "pending flush" category)
for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
writer.append(entry.getKey(), entry.getValue());
SSTableReader ssTable = writer.closeAndOpenReader();
logger.info(String.format("Completed flushing %s (%d bytes)",
ssTable.getFilename(), new File(ssTable.getFilename()).length()));
return ssTable;
}
示例6: testColumnNameEqualToDefaultKeyAlias
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
* Tests CASSANDRA-6892 (key aliases being used improperly for validation)
*/
@Test
public void testColumnNameEqualToDefaultKeyAlias() throws IOException, ParseException
{
File tempSS = tempSSTableFile("Keyspace1", "UUIDKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "UUIDKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column(CFMetaData.DEFAULT_KEY_ALIAS, "not a uuid", 1L));
writer.append(Util.dk(ByteBufferUtil.bytes(UUIDGen.getTimeUUID())), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithColumnNameEqualToDefaultKeyAlias", ".json");
SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[0],
CFMetaData.sparseCFMetaData("Keyspace1", "UUIDKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
JSONArray cols = (JSONArray) row.get("cells");
assertEquals(1, cols.size());
// check column name and value
JSONArray col = (JSONArray) cols.get(0);
assertEquals(CFMetaData.DEFAULT_KEY_ALIAS, ByteBufferUtil.string(hexToBytes((String) col.get(0))));
assertEquals("not a uuid", ByteBufferUtil.string(hexToBytes((String) col.get(1))));
}
示例7: testColumnNameEqualToDefaultKeyAlias
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
/**
* Tests CASSANDRA-6892 (key aliases being used improperly for validation)
*/
@Test
public void testColumnNameEqualToDefaultKeyAlias() throws IOException, ParseException
{
File tempSS = tempSSTableFile(KEYSPACE1, "UUIDKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create(KEYSPACE1, "UUIDKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column(CFMetaData.DEFAULT_KEY_ALIAS, "not a uuid", 1L));
writer.append(Util.dk(ByteBufferUtil.bytes(UUIDGen.getTimeUUID())), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithColumnNameEqualToDefaultKeyAlias", ".json");
SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[0],
CFMetaData.sparseCFMetaData(KEYSPACE1, "UUIDKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
JSONArray cols = (JSONArray) row.get("cells");
assertEquals(1, cols.size());
// check column name and value
JSONArray col = (JSONArray) cols.get(0);
assertEquals(CFMetaData.DEFAULT_KEY_ALIAS, ByteBufferUtil.string(hexToBytes((String) col.get(0))));
assertEquals("not a uuid", ByteBufferUtil.string(hexToBytes((String) col.get(1))));
}
示例8: writeSortedContents
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
logger.info("Writing {}", Memtable.this.toString());
SSTableReader ssTable;
// errors when creating the writer that may leave empty temp files.
SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
try
{
boolean trackContention = logger.isDebugEnabled();
int heavilyContendedRowCount = 0;
// (we can't clear out the map as-we-go to free up memory,
// since the memtable is being used for queries in the "pending flush" category)
for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
{
AtomicBTreeColumns cf = entry.getValue();
if (cf.isMarkedForDelete() && cf.hasColumns())
{
// When every node is up, there's no reason to write batchlog data out to sstables
// (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
// and BL data is strictly local, so we don't need to preserve tombstones for repair.
// If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
// See CASSANDRA-4667.
if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS))
continue;
}
if (trackContention && cf.usePessimisticLocking())
heavilyContendedRowCount++;
if (!cf.isEmpty())
writer.append((DecoratedKey)entry.getKey(), cf);
}
if (writer.getFilePointer() > 0)
{
writer.isolateReferences();
// temp sstables should contain non-repaired data.
ssTable = writer.closeAndOpenReader();
logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
}
else
{
writer.abort();
ssTable = null;
logger.info("Completed flushing; nothing needed to be retained. Commitlog position was {}",
context);
}
if (heavilyContendedRowCount > 0)
logger.debug(String.format("High update contention in %d/%d partitions of %s ", heavilyContendedRowCount, rows.size(), Memtable.this.toString()));
return ssTable;
}
catch (Throwable e)
{
writer.abort();
throw Throwables.propagate(e);
}
}
示例9: importSorted
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private int importSorted(String jsonFile, ColumnFamily columnFamily, String ssTablePath,
IPartitioner partitioner) throws IOException
{
int importedKeys = 0; // already imported keys count
long start = System.nanoTime();
JsonParser parser = getParser(jsonFile);
if (keyCountToImport == null)
{
keyCountToImport = 0;
System.out.println("Counting keys to import, please wait... (NOTE: to skip this use -n <num_keys>)");
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
parser.skipChildren();
if (parser.getCurrentToken() == JsonToken.END_ARRAY)
break;
keyCountToImport++;
}
}
System.out.printf("Importing %s keys...%n", keyCountToImport);
parser = getParser(jsonFile); // renewing parser
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);
int lineNumber = 1;
DecoratedKey prevStoredKey = null;
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
String key = parser.getCurrentName();
Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
DecoratedKey currentKey = partitioner.decorateKey(getKeyValidator(columnFamily).fromString((String) row.get("key")));
if (row.containsKey("metadata"))
parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
addColumnsToCF((List<?>) row.get("cells"), columnFamily);
if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
{
System.err
.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
lineNumber, key);
return -1;
}
// saving decorated key
writer.append(currentKey, columnFamily);
columnFamily.clear();
prevStoredKey = currentKey;
importedKeys++;
lineNumber++;
long current = System.nanoTime();
if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
{
System.out.printf("Currently imported %d keys.%n", importedKeys);
start = current;
}
if (keyCountToImport == importedKeys)
break;
}
writer.closeAndOpenReader();
return importedKeys;
}
示例10: testExportColumnsWithMetadata
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
@Test
public void testExportColumnsWithMetadata() throws IOException, ParseException
{
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add rowA
cfamily.addColumn(Util.cellname("colName"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
cfamily.addColumn(Util.cellname("colName1"), ByteBufferUtil.bytes("val1"), System.currentTimeMillis());
cfamily.delete(new DeletionInfo(0, 0));
writer.append(Util.dk("rowA"), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithDeletionInfo", ".json");
SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[0],
CFMetaData.sparseCFMetaData("Keyspace1", "Counter1", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals("unexpected number of rows", 1, json.size());
JSONObject row = (JSONObject)json.get(0);
assertEquals("unexpected number of keys", 3, row.keySet().size());
assertEquals("unexpected row key",asHex("rowA"),row.get("key"));
// check that the row key is there and present
String rowKey = (String) row.get("key");
assertNotNull("expecing key to be present", rowKey);
assertEquals("key did not match", ByteBufferUtil.bytes("rowA"), hexToBytes(rowKey));
// check that there is metadata and that it contains deletionInfo
JSONObject meta = (JSONObject) row.get("metadata");
assertNotNull("expecing metadata to be present", meta);
assertEquals("unexpected number of metadata entries", 1, meta.keySet().size());
JSONObject serializedDeletionInfo = (JSONObject) meta.get("deletionInfo");
assertNotNull("expecing deletionInfo to be present", serializedDeletionInfo);
assertEquals(
"unexpected serialization format for topLevelDeletion",
"{\"markedForDeleteAt\":0,\"localDeletionTime\":0}",
serializedDeletionInfo.toJSONString());
// check the colums are what we put in
JSONArray cols = (JSONArray) row.get("cells");
assertNotNull("expecing columns to be present", cols);
assertEquals("expecting two columns", 2, cols.size());
JSONArray col1 = (JSONArray) cols.get(0);
assertEquals("column name did not match", ByteBufferUtil.bytes("colName"), hexToBytes((String) col1.get(0)));
assertEquals("column value did not match", ByteBufferUtil.bytes("val"), hexToBytes((String) col1.get(1)));
JSONArray col2 = (JSONArray) cols.get(1);
assertEquals("column name did not match", ByteBufferUtil.bytes("colName1"), hexToBytes((String) col2.get(0)));
assertEquals("column value did not match", ByteBufferUtil.bytes("val1"), hexToBytes((String) col2.get(1)));
}
示例11: writeSortedContents
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
throws ExecutionException, InterruptedException
{
logger.debug("Writing {}", Memtable.this.toString());
SSTableReader ssTable;
// errors when creating the writer that may leave empty temp files.
SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
try
{
// (we can't clear out the map as-we-go to free up memory,
// since the memtable is being used for queries in the "pending flush" category)
for (Map.Entry<RowPosition, AtomicSortedColumns> entry : rows.entrySet())
{
ColumnFamily cf = entry.getValue();
if (cf.isMarkedForDelete())
{
// When every node is up, there's no reason to write batchlog data out to sstables
// (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
// and BL data is strictly local, so we don't need to preserve tombstones for repair.
// If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
// See CASSANDRA-4667.
if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS) && !(cf.getColumnCount() == 0))
continue;
// Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
// But it can result in unexpected behaviour where deletes never make it to disk,
// as they are lost and so cannot override existing column values. So we only remove deleted columns if there
// is a CF level tombstone to ensure the delete makes it into an SSTable.
// We also shouldn't be dropping any columns obsoleted by partition and/or range tombstones in case
// the table has secondary indexes, or else the stale entries wouldn't be cleaned up during compaction,
// and will only be dropped during 2i query read-repair, if at all.
if (!cfs.indexManager.hasIndexes())
ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
}
writer.append((DecoratedKey)entry.getKey(), cf);
}
if (writer.getFilePointer() > 0)
{
ssTable = writer.closeAndOpenReader();
logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
}
else
{
writer.abort();
ssTable = null;
logger.debug("Completed flushing; nothing needed to be retained. Commitlog position was {}",
context.get());
}
return ssTable;
}
catch (Throwable e)
{
writer.abort();
throw Throwables.propagate(e);
}
}
示例12: importSorted
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private int importSorted(String jsonFile, ColumnFamily columnFamily, String ssTablePath,
IPartitioner<?> partitioner) throws IOException
{
int importedKeys = 0; // already imported keys count
long start = System.nanoTime();
JsonParser parser = getParser(jsonFile);
if (keyCountToImport == null)
{
keyCountToImport = 0;
System.out.println("Counting keys to import, please wait... (NOTE: to skip this use -n <num_keys>)");
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
parser.skipChildren();
if (parser.getCurrentToken() == JsonToken.END_ARRAY)
break;
keyCountToImport++;
}
}
System.out.printf("Importing %s keys...%n", keyCountToImport);
parser = getParser(jsonFile); // renewing parser
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);
int lineNumber = 1;
DecoratedKey prevStoredKey = null;
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
String key = parser.getCurrentName();
Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
DecoratedKey currentKey = partitioner.decorateKey(hexToBytes((String) row.get("key")));
if (row.containsKey("metadata"))
parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
addToSuperCF((Map<?, ?>)row.get("columns"), columnFamily);
else
addToStandardCF((List<?>)row.get("columns"), columnFamily);
if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
{
System.err
.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
lineNumber, key);
return -1;
}
// saving decorated key
writer.append(currentKey, columnFamily);
columnFamily.clear();
prevStoredKey = currentKey;
importedKeys++;
lineNumber++;
long current = System.nanoTime();
if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
{
System.out.printf("Currently imported %d keys.%n", importedKeys);
start = current;
}
if (keyCountToImport == importedKeys)
break;
}
writer.closeAndOpenReader();
return importedKeys;
}
示例13: testExportColumnsWithMetadata
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
@Test
public void testExportColumnsWithMetadata() throws IOException, ParseException
{
File tempSS = tempSSTableFile("Keyspace1", "Standard1");
ColumnFamily cfamily = TreeMapBackedSortedColumns.factory.create("Keyspace1", "Standard1");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2);
// Add rowA
cfamily.addColumn(ByteBufferUtil.bytes("colName"), ByteBufferUtil.bytes("val"), System.currentTimeMillis());
cfamily.addColumn(ByteBufferUtil.bytes("colName1"), ByteBufferUtil.bytes("val1"), System.currentTimeMillis());
cfamily.delete(new DeletionInfo(0, 0));
writer.append(Util.dk("rowA"), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithDeletionInfo", ".json");
SSTableExport.export(reader, new PrintStream(tempJson.getPath()), new String[0]);
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
System.out.println(json.toJSONString());
assertEquals("unexpected number of rows", 1, json.size());
JSONObject row = (JSONObject)json.get(0);
assertEquals("unexpected number of keys", 3, row.keySet().size());
assertEquals("unexpected row key",asHex("rowA"),row.get("key"));
// check that the row key is there and present
String rowKey = (String) row.get("key");
assertNotNull("expecing key to be present", rowKey);
assertEquals("key did not match", ByteBufferUtil.bytes("rowA"), hexToBytes(rowKey));
// check that there is metadata and that it contains deletionInfo
JSONObject meta = (JSONObject) row.get("metadata");
assertNotNull("expecing metadata to be present", meta);
assertEquals("unexpected number of metadata entries", 1, meta.keySet().size());
JSONObject serializedDeletionInfo = (JSONObject) meta.get("deletionInfo");
assertNotNull("expecing deletionInfo to be present", serializedDeletionInfo);
assertEquals(
"unexpected serialization format for topLevelDeletion",
"{\"markedForDeleteAt\":0,\"localDeletionTime\":0}",
serializedDeletionInfo.toJSONString());
// check the colums are what we put in
JSONArray cols = (JSONArray) row.get("columns");
assertNotNull("expecing columns to be present", cols);
assertEquals("expecting two columns", 2, cols.size());
JSONArray col1 = (JSONArray) cols.get(0);
assertEquals("column name did not match", ByteBufferUtil.bytes("colName"), hexToBytes((String) col1.get(0)));
assertEquals("column value did not match", ByteBufferUtil.bytes("val"), hexToBytes((String) col1.get(1)));
JSONArray col2 = (JSONArray) cols.get(1);
assertEquals("column name did not match", ByteBufferUtil.bytes("colName1"), hexToBytes((String) col2.get(0)));
assertEquals("column value did not match", ByteBufferUtil.bytes("val1"), hexToBytes((String) col2.get(1)));
}
示例14: writeSortedContents
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private SSTableReader writeSortedContents(ReplayPosition context, File sstableDirectory)
{
logger.info("Writing {}", Memtable.this.toString());
SSTableReader ssTable;
// errors when creating the writer that may leave empty temp files.
SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
try
{
// (we can't clear out the map as-we-go to free up memory,
// since the memtable is being used for queries in the "pending flush" category)
for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
{
ColumnFamily cf = entry.getValue();
if (cf.isMarkedForDelete() && cf.hasColumns())
{
// When every node is up, there's no reason to write batchlog data out to sstables
// (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
// and BL data is strictly local, so we don't need to preserve tombstones for repair.
// If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
// See CASSANDRA-4667.
if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS))
continue;
}
if (!cf.isEmpty())
writer.append((DecoratedKey)entry.getKey(), cf);
}
if (writer.getFilePointer() > 0)
{
writer.isolateReferences();
// temp sstables should contain non-repaired data.
ssTable = writer.closeAndOpenReader();
logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
}
else
{
writer.abort();
ssTable = null;
logger.info("Completed flushing; nothing needed to be retained. Commitlog position was {}",
context);
}
return ssTable;
}
catch (Throwable e)
{
writer.abort();
throw Throwables.propagate(e);
}
}
示例15: importSorted
import org.apache.cassandra.io.sstable.SSTableWriter; //导入方法依赖的package包/类
private int importSorted(String jsonFile, ColumnFamily columnFamily, String ssTablePath,
IPartitioner<?> partitioner) throws IOException
{
int importedKeys = 0; // already imported keys count
long start = System.nanoTime();
JsonParser parser = getParser(jsonFile);
if (keyCountToImport == null)
{
keyCountToImport = 0;
System.out.println("Counting keys to import, please wait... (NOTE: to skip this use -n <num_keys>)");
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
parser.skipChildren();
if (parser.getCurrentToken() == JsonToken.END_ARRAY)
break;
keyCountToImport++;
}
}
System.out.printf("Importing %s keys...%n", keyCountToImport);
parser = getParser(jsonFile); // renewing parser
SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);
int lineNumber = 1;
DecoratedKey prevStoredKey = null;
parser.nextToken(); // START_ARRAY
while (parser.nextToken() != null)
{
String key = parser.getCurrentName();
Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
DecoratedKey currentKey = partitioner.decorateKey(getKeyValidator(columnFamily).fromString((String) row.get("key")));
if (row.containsKey("metadata"))
parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);
addColumnsToCF((List<?>) row.get("cells"), columnFamily);
if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
{
System.err
.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
lineNumber, key);
return -1;
}
// saving decorated key
writer.append(currentKey, columnFamily);
columnFamily.clear();
prevStoredKey = currentKey;
importedKeys++;
lineNumber++;
long current = System.nanoTime();
if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
{
System.out.printf("Currently imported %d keys.%n", importedKeys);
start = current;
}
if (keyCountToImport == importedKeys)
break;
}
writer.closeAndOpenReader();
return importedKeys;
}