本文整理汇总了TypeScript中dexie.ignoreTransaction函数的典型用法代码示例。如果您正苦于以下问题:TypeScript ignoreTransaction函数的具体用法?TypeScript ignoreTransaction怎么用?TypeScript ignoreTransaction使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ignoreTransaction函数的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的TypeScript代码示例。
示例1: importInto
export async function importInto(db: Dexie, exportedData: Blob | JsonStream<DexieExportJsonStructure>, options?: ImportOptions): Promise<void> {
options = options || {}; // All booleans defaults to false.
const CHUNK_SIZE = options!.chunkSizeBytes || (DEFAULT_KILOBYTES_PER_CHUNK * 1024);
const jsonStream = await loadUntilWeGotEnoughData(exportedData, CHUNK_SIZE);
let dbExportFile = jsonStream.result;
const readBlobsSynchronously = 'FileReaderSync' in self; // true in workers only.
const dbExport = dbExportFile.data!;
if (!options!.acceptNameDiff && db.name !== dbExport.databaseName)
throw new Error(`Name differs. Current database name is ${db.name} but export is ${dbExport.databaseName}`);
if (!options!.acceptVersionDiff && db.verno !== dbExport.databaseVersion) {
// Possible feature: Call upgraders in some isolated way if this happens... ?
throw new Error(`Database version differs. Current database is in version ${db.verno} but export is ${dbExport.databaseVersion}`);
}
const { progressCallback } = options;
const progress: ImportProgress = {
done: false,
completedRows: 0,
completedTables: 0,
totalRows: dbExport.tables.reduce((p, c) => p + c.rowCount, 0),
totalTables: dbExport.tables.length
};
if (progressCallback) {
// Keep ongoing transaction private
Dexie.ignoreTransaction(()=>progressCallback(progress));
}
if (options.noTransaction) {
await importAll();
} else {
await db.transaction('rw', db.tables, importAll);
}
async function importAll () {
do {
for (const tableExport of dbExport.data) {
if (!tableExport.rows) break; // Need to pull more!
if ((tableExport.rows as any).complete && tableExport.rows.length === 0)
continue;
if (progressCallback) {
// Keep ongoing transaction private
Dexie.ignoreTransaction(()=>progressCallback(progress));
}
const tableName = tableExport.tableName;
const table = db.table(tableName);
const tableSchemaStr = dbExport.tables.filter(t => t.name === tableName)[0].schema;
if (!table) {
if (!options!.acceptMissingTables)
throw new Error(`Exported table ${tableExport.tableName} is missing in installed database`);
else
continue;
}
if (!options!.acceptChangedPrimaryKey &&
tableSchemaStr.split(',')[0] != table.schema.primKey.src) {
throw new Error(`Primary key differs for table ${tableExport.tableName}. `);
}
const rows = tableExport.rows.map(row => TSON.revive(row));
const filter = options!.filter;
const filteredRows = filter ?
tableExport.inbound ?
rows.filter(value => filter(tableName, value)) :
rows.filter(([key, value]) => filter(tableName, value, key)) :
rows;
const [keys, values] = tableExport.inbound ?
[undefined, filteredRows] :
[filteredRows.map(row=>row[0]), rows.map(row=>row[1])];
if (options!.clearTablesBeforeImport) {
await table.clear();
}
if (options!.overwriteValues)
await table.bulkPut(values, keys);
else
await table.bulkAdd(values, keys);
progress.completedRows += rows.length;
if ((rows as any).complete) {
progress.completedTables += 1;
}
rows.splice(0, rows.length); // Free up RAM, keep existing array instance.
}
// Avoid unnescessary loops in "for (const tableExport of dbExport.data)"
while (dbExport.data.length > 0 && dbExport.data[0].rows && (dbExport.data[0].rows as any).complete) {
// We've already imported all rows from the first table. Delete its occurrence
dbExport.data.splice(0, 1);
}
if (!jsonStream.done() && !jsonStream.eof()) {
// Pull some more (keeping transaction alive)
if (readBlobsSynchronously) {
// If we can pull from blob synchronically, we don't have to
// keep transaction alive using Dexie.waitFor().
// This will only be possible in workers.
jsonStream.pullSync(CHUNK_SIZE);
} else {
await Dexie.waitFor(jsonStream.pullAsync(CHUNK_SIZE));
}
//.........这里部分代码省略.........
示例2: importAll
async function importAll () {
do {
for (const tableExport of dbExport.data) {
if (!tableExport.rows) break; // Need to pull more!
if ((tableExport.rows as any).complete && tableExport.rows.length === 0)
continue;
if (progressCallback) {
// Keep ongoing transaction private
Dexie.ignoreTransaction(()=>progressCallback(progress));
}
const tableName = tableExport.tableName;
const table = db.table(tableName);
const tableSchemaStr = dbExport.tables.filter(t => t.name === tableName)[0].schema;
if (!table) {
if (!options!.acceptMissingTables)
throw new Error(`Exported table ${tableExport.tableName} is missing in installed database`);
else
continue;
}
if (!options!.acceptChangedPrimaryKey &&
tableSchemaStr.split(',')[0] != table.schema.primKey.src) {
throw new Error(`Primary key differs for table ${tableExport.tableName}. `);
}
const rows = tableExport.rows.map(row => TSON.revive(row));
const filter = options!.filter;
const filteredRows = filter ?
tableExport.inbound ?
rows.filter(value => filter(tableName, value)) :
rows.filter(([key, value]) => filter(tableName, value, key)) :
rows;
const [keys, values] = tableExport.inbound ?
[undefined, filteredRows] :
[filteredRows.map(row=>row[0]), rows.map(row=>row[1])];
if (options!.clearTablesBeforeImport) {
await table.clear();
}
if (options!.overwriteValues)
await table.bulkPut(values, keys);
else
await table.bulkAdd(values, keys);
progress.completedRows += rows.length;
if ((rows as any).complete) {
progress.completedTables += 1;
}
rows.splice(0, rows.length); // Free up RAM, keep existing array instance.
}
// Avoid unnescessary loops in "for (const tableExport of dbExport.data)"
while (dbExport.data.length > 0 && dbExport.data[0].rows && (dbExport.data[0].rows as any).complete) {
// We've already imported all rows from the first table. Delete its occurrence
dbExport.data.splice(0, 1);
}
if (!jsonStream.done() && !jsonStream.eof()) {
// Pull some more (keeping transaction alive)
if (readBlobsSynchronously) {
// If we can pull from blob synchronically, we don't have to
// keep transaction alive using Dexie.waitFor().
// This will only be possible in workers.
jsonStream.pullSync(CHUNK_SIZE);
} else {
await Dexie.waitFor(jsonStream.pullAsync(CHUNK_SIZE));
}
}
} while (!jsonStream.done() && !jsonStream.eof());
}
示例3: exportAll
async function exportAll() {
// Count rows:
const tablesRowCounts = await Promise.all(db.tables.map(table => table.count()));
tablesRowCounts.forEach((rowCount, i) => tables[i].rowCount = rowCount);
progress.totalRows = tablesRowCounts.reduce((p,c)=>p+c);
// Write first JSON slice
const emptyExportJson = JSON.stringify(emptyExport, undefined, prettyJson ? 2 : undefined);
const posEndDataArray = emptyExportJson.lastIndexOf(']');
const firstJsonSlice = emptyExportJson.substring(0, posEndDataArray);
slices.push(firstJsonSlice);
const filter = options!.filter;
for (const {name: tableName} of tables) {
const table = db.table(tableName);
const {primKey} = table.schema;
const inbound = !!primKey.keyPath;
const LIMIT = options!.numRowsPerChunk || DEFAULT_ROWS_PER_CHUNK;
const emptyTableExport: DexieExportedTable = inbound ? {
tableName: table.name,
inbound: true,
rows: []
} : {
tableName: table.name,
inbound: false,
rows: []
};
let emptyTableExportJson = JSON.stringify(emptyTableExport, undefined, prettyJson ? 2 : undefined);
if (prettyJson) {
// Increase indentation according to this:
// {
// ...
// data: [
// ...
// data: [
// 123456<---- here
// ]
// ]
// }
emptyTableExportJson = emptyTableExportJson.split('\n').join('\n ');
}
const posEndRowsArray = emptyTableExportJson.lastIndexOf(']');
slices.push(emptyTableExportJson.substring(0, posEndRowsArray));
let lastKey: any = null;
let mayHaveMoreRows = true;
while (mayHaveMoreRows) {
if (progressCallback) {
// Keep ongoing transaction private
Dexie.ignoreTransaction(()=>progressCallback(progress));
}
const chunkedCollection = lastKey == null ?
table.limit(LIMIT) :
table.where(':id').above(lastKey).limit(LIMIT);
const values = await chunkedCollection.toArray();
if (values.length === 0) break;
if (lastKey != null) {
// Not initial chunk. Must add a comma:
slices.push(",");
if (prettyJson) {
slices.push("\n ");
}
}
mayHaveMoreRows = values.length === LIMIT;
if (inbound) {
const filteredValues = filter ?
values.filter(value => filter(tableName, value)) :
values;
const tsonValues = filteredValues.map(value => TSON.encapsulate(value));
if (TSON.mustFinalize()) {
await Dexie.waitFor(TSON.finalize(tsonValues));
}
let json = JSON.stringify(tsonValues, undefined, prettyJson ? 2 : undefined);
if (prettyJson) json = json.split('\n').join('\n ');
// By generating a blob here, we give web platform the opportunity to store the contents
// on disk and release RAM.
slices.push(new Blob([json.substring(1, json.length - 1)]));
lastKey = values.length > 0 ?
Dexie.getByKeyPath(values[values.length -1], primKey.keyPath as string) :
null;
} else {
const keys = await chunkedCollection.primaryKeys();
let keyvals = keys.map((key, i) => [key, values[i]]);
if (filter) keyvals = keyvals.filter(([key, value]) => filter(tableName, value, key));
const tsonTuples = keyvals.map(tuple => TSON.encapsulate(tuple));
if (TSON.mustFinalize()) {
await Dexie.waitFor(TSON.finalize(tsonTuples));
}
let json = JSON.stringify(tsonTuples, undefined, prettyJson ? 2 : undefined);
if (prettyJson) json = json.split('\n').join('\n ');
//.........这里部分代码省略.........