本文整理汇总了Java中schemacrawler.utility.SchemaCrawlerUtility类的典型用法代码示例。如果您正苦于以下问题:Java SchemaCrawlerUtility类的具体用法?Java SchemaCrawlerUtility怎么用?Java SchemaCrawlerUtility使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SchemaCrawlerUtility类属于schemacrawler.utility包,在下文中一共展示了SchemaCrawlerUtility类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: importSchemas
import schemacrawler.utility.SchemaCrawlerUtility; //导入依赖的package包/类
public static List<String> importSchemas(Datasource datasource) throws ServiceException {
List<String> schemaNames = new ArrayList<>();
try (Connection connection = ConnUtils.getSchemaCrawlerConnection(datasource)) {
final SchemaCrawlerOptions options = getSchemaCrawlerOptions(datasource);
options.getSchemaInfoLevel().setRetrieveDatabaseInfo(false);
options.getSchemaInfoLevel().setRetrieveTables(false);
Catalog database = SchemaCrawlerUtility.getCatalog(connection, options);
// MySQL only has catalog name, no schema name
if (datasource.getDatabaseType() == DBType.MYSQL) {
schemaNames.addAll(
database.getSchemas().stream().map(Schema::getCatalogName).collect(Collectors.toList()));
}
else {
schemaNames.addAll(database.getSchemas().stream().map(Schema::getName).collect(Collectors.toList()));
}
}
catch (Exception e) {
logger.error(e.getMessage(), e);
throw new ServiceException(e);
}
return schemaNames;
}
示例2: crawlDatabase
import schemacrawler.utility.SchemaCrawlerUtility; //导入依赖的package包/类
/**
* Starts the schema crawler and lets it crawl the given JDBC connection.
*
* @param connection The JDBC connection
* @param schemaRule The {@link InclusionRule} to be passed to SchemaCrawler that specifies which schemas should be analyzed
* @param tableRule The {@link InclusionRule} to be passed to SchemaCrawler that specifies which tables should be analyzed. If a table is included by the
* {@code tableRule} but excluded by the {@code schemaRule} it will not be analyzed.
* @return The populated {@link Catalog} object containing the metadata for the extractor
* @throws SchemaCrawlerException Gets thrown when the database could not be crawled successfully
*/
public static Catalog crawlDatabase(final Connection connection, final InclusionRule schemaRule, final InclusionRule tableRule) throws SchemaCrawlerException {
final SchemaCrawlerOptions options = new SchemaCrawlerOptions();
final SchemaInfoLevel level = SchemaInfoLevelBuilder.standard();
level.setRetrieveIndexes(false);
options.setSchemaInfoLevel(level);
options.setRoutineTypes(Arrays.asList(RoutineType.procedure, RoutineType.unknown)); // RoutineType.function not supported by h2
options.setSchemaInclusionRule(schemaRule == null ? new IncludeAll() : schemaRule);
options.setTableInclusionRule(tableRule == null ? new IncludeAll() : tableRule);
try {
return SchemaCrawlerUtility.getCatalog(connection, options);
} catch (SchemaCrawlerException e) {
LOG.error("Schema crawling failed with exception", e);
throw e;
}
}
示例3: generateSpecification
import schemacrawler.utility.SchemaCrawlerUtility; //导入依赖的package包/类
public SpecRegistry generateSpecification(DataSource dataSource) throws Exception {
SchemaCrawlerOptions options = new SchemaCrawlerOptions();
// Set what details are required in the schema - this affects the
// time taken to crawl the schema
options.setSchemaInfoLevel(SchemaInfoLevelBuilder.detailed());
Catalog catalog = SchemaCrawlerUtility.getCatalog( dataSource.getConnection(),
options);
firstPass(catalog);
secondPass(catalog);
postProcess(catalog);
return registry;
}
示例4: main
import schemacrawler.utility.SchemaCrawlerUtility; //导入依赖的package包/类
public static void main(String[] args) throws SQLException, SchemaCrawlerException, ClassNotFoundException {
Driver driver = DriverManager.getDriver("jdbc:derby:memory:test;create=true");
Connection connection = DriverManager.getConnection("jdbc:derby:memory:test;create=true", new Properties());
Statement statement = connection.createStatement();
// id INT NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 0, INCREMENT BY 1)
statement.execute("CREATE TABLE USERS (id INT NOT NULL, name varchar(20), constraint users_pk_id primary key(id))");
statement.execute("CREATE TABLE FRIENDS (id1 INT, id2 INT, " +
" constraint fk_users_id1 foreign key(id1) references users(id)," +
" constraint fk_users_id2 foreign key(id2) references users(id)" +
")");
final SchemaCrawlerOptions options = new SchemaCrawlerOptions();
options.setSchemaInfoLevel(SchemaInfoLevel.standard());
final Catalog catalog = SchemaCrawlerUtility.getCatalog(connection, options);
for (final Schema schema : catalog.getSchemas()) {
System.out.println(schema);
for (final Table table : catalog.getTables(schema)) {
System.out.println("o--> " + table + " pk " + table.getPrimaryKey() + " fks " + table.getForeignKeys() + " type " + table.getTableType());
for (final Column column : table.getColumns()) {
System.out.println(" o--> " + column + " pk: " + column.isPartOfPrimaryKey() + " fk: " + column.isPartOfForeignKey());
}
}
}
}
示例5: extractTables
import schemacrawler.utility.SchemaCrawlerUtility; //导入依赖的package包/类
static TableInfo[] extractTables(Connection conn, final String schemaName, Rules rules) throws SchemaCrawlerException, SQLException {
ArrayList<TableInfo> tableList = new ArrayList<TableInfo>(100);
final SchemaCrawlerOptions options = new SchemaCrawlerOptions();
options.setSchemaInfoLevel(SchemaInfoLevel.standard());
System.out.println("my catalog =" + conn.getCatalog());
options.setSchemaInclusionRule(new InclusionRule() {
@Override public boolean test(String anObject) {
return schemaName.equals(anObject);
}
});
final Catalog catalog = SchemaCrawlerUtility.getCatalog(conn, options);
System.out.println("schem ! ");
final Schema schema = catalog.getSchema(schemaName);
System.out.println("Schema: " + schema);
for (final Table table : catalog.getTables(schema)) {
String tableName = table.getName();
System.out.println(table + " pk " + table.getPrimaryKey() + " fks " + table.getForeignKeys() + " type " + table.getTableType());
if (rules.skipTable(tableName) || table.getTableType().isView()) {
System.out.println("SKIPPED");
continue;
}
List<Column> columns = table.getColumns();
List<String> fields = new ArrayList<>(columns.size());
for (final Column column : columns) {
// System.out.println(" o--> " + column + " pk: "+ column.isPartOfPrimaryKey() + " fk: " + column.isPartOfForeignKey());
String columnName = column.getName();
if (column.isPartOfPrimaryKey() && rules.skipPrimaryKey(tableName, columnName)) {
// skip, todo strategy
} else if (column.isPartOfForeignKey()) {
// skip, todo strategy
} else {
fields.add(columnName);
}
}
Map<List<String>, String> fks = extractForeignKeys(table);
tableList.add(TableInfo.add(tableName, extractPrimaryKeys(table, fks), fields, fks));
}
return tableList.toArray(new TableInfo[tableList.size()]);
}