本文整理汇总了Java中org.apache.metamodel.schema.Schema.getName方法的典型用法代码示例。如果您正苦于以下问题:Java Schema.getName方法的具体用法?Java Schema.getName怎么用?Java Schema.getName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.metamodel.schema.Schema
的用法示例。
在下文中一共展示了Schema.getName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: get
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@RequestMapping(method = RequestMethod.GET)
@ResponseBody
public GetSchemaResponse get(@PathVariable("tenant") String tenantId,
@PathVariable("dataContext") String dataSourceName, @PathVariable("schema") String schemaId) {
final TenantContext tenantContext = tenantRegistry.getTenantContext(tenantId);
final DataContext dataContext = tenantContext.getDataSourceRegistry().openDataContext(dataSourceName);
final DataContextTraverser traverser = new DataContextTraverser(dataContext);
final Schema schema = traverser.getSchema(schemaId);
final String tenantName = tenantContext.getTenantName();
final UriBuilder uriBuilder = UriBuilder.fromPath("/{tenant}/{dataContext}/s/{schema}/t/{table}");
final String schemaName = schema.getName();
final List<GetSchemaResponseTables> tableLinks = schema.getTableNames().stream().map(t -> {
final String uri = uriBuilder.build(tenantName, dataSourceName, schemaName, t).toString();
return new GetSchemaResponseTables().name(String.valueOf(t)).uri(uri);
}).collect(Collectors.toList());
final GetSchemaResponse resp = new GetSchemaResponse();
resp.type("schema");
resp.name(schemaName);
resp.datasource(dataSourceName);
resp.tenant(tenantName);
resp.tables(tableLinks);
return resp;
}
示例2: addDataSourceInternal
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
private void addDataSourceInternal(DataContext dc, MDataSource mDataSource) throws MetaException {
for (Schema rawSchema : dc.getSchemas()) {
String schemaName = rawSchema.getName();
if (schemaName == null)
schemaName = "__DEFAULT";
if ("information_schema".equalsIgnoreCase(schemaName))
continue;
LOG.debug("add schema. schemaName=" + schemaName);
MSchema mSchema = new MSchema(schemaName, mDataSource);
pm.makePersistent(mSchema);
addTablesOfSchema(rawSchema, mSchema, null);
}
}
示例3: getSchemaMetadata
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Override
public SchemaMetadata getSchemaMetadata(final Schema schema) {
if (schema == null) {
return null;
}
final String schemaName = schema.getName();
return getSchemaMetadataByName(schemaName);
}
示例4: getTablePostFix
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
private static String getTablePostFix(final Table table) {
final String postFix;
final Schema schema = table.getSchema();
if (schema == null) {
postFix = "." + table.getName();
} else {
postFix = "." + schema.getName() + "." + table.getName();
}
return postFix;
}
示例5: getValue
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Override
public String getValue() {
final Schema schema = getSchema();
if (schema == null) {
return null;
}
return schema.getName();
}
示例6: getSchemaMetadata
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Override
public SchemaMetadata getSchemaMetadata(Schema schema) {
if (schema == null) {
return null;
}
final String schemaName = schema.getName();
return getSchemaMetadataByName(schemaName);
}
示例7: testSerializeDeserializeDatastores
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Test
public void testSerializeDeserializeDatastores() {
String csv = ConfigurationSerializer.serializeAnalyzerBeansConfigurationDataStores(analyzerBeansConfiguration);
logger.info("Csv: " + csv);
AnalyzerBeansConfiguration deserialized = ConfigurationSerializer.deserializeAnalyzerBeansDatastores(csv);
for (String datastoreName : analyzerBeansConfiguration.getDatastoreCatalog().getDatastoreNames()) {
logger.info("Datastore: " + datastoreName);
Datastore datastore = analyzerBeansConfiguration.getDatastoreCatalog().getDatastore(datastoreName);
Datastore deserializedDatastore = deserialized.getDatastoreCatalog().getDatastore(datastoreName);
Assert.assertNotNull(deserializedDatastore);
SchemaNavigator schemaNavigator = datastore.openConnection().getSchemaNavigator();
SchemaNavigator deserializedSchemaNavigator = deserializedDatastore.openConnection().getSchemaNavigator();
for (Schema schema : schemaNavigator.getSchemas()) {
String schemaName = schema.getName();
logger.info("\tSchema: " + schemaName);
Schema deserializedSchema = deserializedSchemaNavigator.getSchemaByName(schemaName);
Assert.assertNotNull(deserializedSchema);
for (Table table : schema.getTables()) {
String tableName = table.getName();
logger.info("\t\tTable: " + tableName);
Table deserializedTable = deserializedSchema.getTableByName(tableName);
Assert.assertNotNull(deserializedTable);
for (Column column : table.getColumns()) {
String columnName = column.getName();
logger.info("\t\t\tColumn: " + columnName);
Column deserializedColumn = deserializedTable.getColumnByName(columnName);
Assert.assertNotNull(deserializedColumn);
}
}
}
}
}
示例8: getJdbcSchemaName
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
private String getJdbcSchemaName(Schema schema) {
if (_usesCatalogsAsSchemas) {
return null;
} else {
return schema.getName();
}
}
示例9: getCatalogName
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
private String getCatalogName(Schema schema) {
if (_usesCatalogsAsSchemas) {
return schema.getName();
} else {
return _dataContext.getCatalogName();
}
}
示例10: rewriteFromItem
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Override
protected String rewriteFromItem(Query query, FromItem item) {
String result = super.rewriteFromItem(query, item);
Table table = item.getTable();
if (table != null) {
Schema schema = table.getSchema();
if (schema != null) {
String schemaName = schema.getName();
if (schemaName != null) {
result = result.replaceFirst(schemaName, '\"' + schema.getName() + '\"');
}
}
}
return result;
}
示例11: getDefaultSchemaName
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
@Override
public String getDefaultSchemaName() throws MetaModelException {
for (DataContext dc : _delegates) {
Schema schema = dc.getDefaultSchema();
if (schema != null) {
return schema.getName();
}
}
return null;
}
示例12: getDefaultSchema
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public final Schema getDefaultSchema() throws MetaModelException {
Schema result = null;
final String defaultSchemaName = getDefaultSchemaName();
if (defaultSchemaName != null) {
result = getSchemaByName(defaultSchemaName);
}
if (result == null) {
final List<Schema> schemas = getSchemas();
if (schemas.size() == 1) {
result = schemas.get(0);
} else {
int highestTableCount = -1;
for (Schema schema : schemas) {
String name = schema.getName();
if (schema != null) {
name = name.toLowerCase();
final boolean isInformationSchema = name.startsWith("information") && name.endsWith("schema");
if (!isInformationSchema && schema.getTableCount() > highestTableCount) {
highestTableCount = schema.getTableCount();
result = schema;
}
}
}
}
}
return result;
}
示例13: runCancelJobJob
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
public static void runCancelJobJob(final DataCleanerConfiguration configuration,
final ClusterManager clusterManager) throws Throwable {
// build a job that concats names and inserts the concatenated names
// into a file
final AnalysisJobBuilder jobBuilder = new AnalysisJobBuilder(configuration);
jobBuilder.setDatastore("orderdb");
jobBuilder.addSourceColumns("CUSTOMERS.CUSTOMERNUMBER", "CUSTOMERS.CONTACTFIRSTNAME",
"CUSTOMERS.CONTACTLASTNAME");
// concatenate firstname + lastname
final TransformerComponentBuilder<ConcatenatorTransformer> concatenator =
jobBuilder.addTransformer(ConcatenatorTransformer.class);
concatenator.addInputColumn(jobBuilder.getSourceColumnByName("CONTACTFIRSTNAME"));
concatenator.addInputColumn(jobBuilder.getSourceColumnByName("CONTACTLASTNAME"));
concatenator.setConfiguredProperty("Separator", " ");
// insert into CSV file
final Datastore csvDatastore = configuration.getDatastoreCatalog().getDatastore("csv");
final Datastore dbDatastore = configuration.getDatastoreCatalog().getDatastore("orderdb");
final DatastoreConnection csvCon = csvDatastore.openConnection();
final DatastoreConnection dbCon = dbDatastore.openConnection();
try {
final Schema schema = csvCon.getDataContext().getDefaultSchema();
final String schemaName = schema.getName();
final String tableName = schema.getTable(0).getName();
final AnalyzerComponentBuilder<InsertIntoTableAnalyzer> insert =
jobBuilder.addAnalyzer(InsertIntoTableAnalyzer.class);
insert.setConfiguredProperty("Datastore", csvDatastore);
insert.addInputColumn(jobBuilder.getSourceColumnByName("CUSTOMERNUMBER"));
insert.addInputColumn(concatenator.getOutputColumns().get(0));
insert.setConfiguredProperty("Schema name", schemaName);
insert.setConfiguredProperty("Table name", tableName);
insert.setConfiguredProperty("Column names", new String[] { "id", "name" });
insert.setConfiguredProperty("Buffer size", WriteBufferSizeOption.TINY);
// build the job
final AnalysisJob job = jobBuilder.toAnalysisJob();
// run the job in a distributed fashion
final DistributedAnalysisRunner runner = new DistributedAnalysisRunner(configuration, clusterManager);
final AnalysisResultFuture resultFuture = runner.run(job);
resultFuture.cancel();
Assert.assertTrue(resultFuture.isCancelled());
} finally {
dbCon.close();
csvCon.close();
jobBuilder.close();
}
}
示例14: testUpdateCSV
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
public void testUpdateCSV() throws Exception {
final File file = new File("target/example_updated.csv");
FileHelper.copy(new File("src/test/resources/example_updated.csv"), file);
final CsvDatastore datastore = new CsvDatastore("example", file.getPath(), null, ',', "UTF8");
final UpdateableDatastoreConnection connection = datastore.openConnection();
final DataContext dataContext = connection.getDataContext();
final Schema schema = dataContext.getDefaultSchema();
final Table table = schema.getTable(0);
final UpdateTableAnalyzer updateTableAnalyzer = new UpdateTableAnalyzer();
updateTableAnalyzer.datastore = datastore;
updateTableAnalyzer.schemaName = schema.getName();
updateTableAnalyzer.tableName = table.getName();
updateTableAnalyzer.columnNames = new String[] { "name" };
updateTableAnalyzer.conditionColumnNames = new String[] { "id" };
updateTableAnalyzer.errorHandlingOption = ErrorHandlingOption.SAVE_TO_FILE;
updateTableAnalyzer._componentContext = EasyMock.createMock(ComponentContext.class);
final InputColumn<Object> inputId = new MockInputColumn<>("id", Object.class);
final InputColumn<Object> inputNewName = new MockInputColumn<>("new_name", Object.class);
updateTableAnalyzer.values = new InputColumn[] { inputNewName };
updateTableAnalyzer.conditionValues = new InputColumn[] { inputId };
updateTableAnalyzer.validate();
updateTableAnalyzer.init();
updateTableAnalyzer.run(new MockInputRow().put(inputId, 1).put(inputNewName, "foo"), 1);
updateTableAnalyzer.run(new MockInputRow().put(inputId, "2").put(inputNewName, "bar"), 1);
updateTableAnalyzer.run(new MockInputRow().put(inputId, 3).put(inputNewName, "baz"), 1);
final WriteDataResult result = updateTableAnalyzer.getResult();
assertEquals(0, result.getErrorRowCount());
assertEquals(0, result.getWrittenRowCount());
assertEquals(3, result.getUpdatesCount());
final DataSet dataSet = dataContext.query().from(table).select("id", "name").execute();
assertTrue(dataSet.next());
assertEquals("Row[values=[4, hans]]", dataSet.getRow().toString());
assertTrue(dataSet.next());
assertEquals("Row[values=[5, manuel]]", dataSet.getRow().toString());
assertTrue(dataSet.next());
assertEquals("Row[values=[6, ankit]]", dataSet.getRow().toString());
assertTrue(dataSet.next());
assertEquals("Row[values=[1, foo]]", dataSet.getRow().toString());
assertTrue(dataSet.next());
assertEquals("Row[values=[2, bar]]", dataSet.getRow().toString());
assertTrue(dataSet.next());
assertEquals("Row[values=[3, baz]]", dataSet.getRow().toString());
assertFalse(dataSet.next());
connection.close();
}
示例15: runCancelJobJob
import org.apache.metamodel.schema.Schema; //导入方法依赖的package包/类
public static void runCancelJobJob(AnalyzerBeansConfiguration configuration, ClusterManager clusterManager)
throws Throwable {
// build a job that concats names and inserts the concatenated names
// into a file
final AnalysisJobBuilder jobBuilder = new AnalysisJobBuilder(configuration);
jobBuilder.setDatastore("orderdb");
jobBuilder.addSourceColumns("CUSTOMERS.CUSTOMERNUMBER", "CUSTOMERS.CONTACTFIRSTNAME",
"CUSTOMERS.CONTACTLASTNAME");
// concatenate firstname + lastname
final TransformerJobBuilder<ConcatenatorTransformer> concatenator = jobBuilder
.addTransformer(ConcatenatorTransformer.class);
concatenator.addInputColumn(jobBuilder.getSourceColumnByName("CONTACTFIRSTNAME"));
concatenator.addInputColumn(jobBuilder.getSourceColumnByName("CONTACTLASTNAME"));
concatenator.setConfiguredProperty("Separator", " ");
// insert into CSV file
final Datastore csvDatastore = configuration.getDatastoreCatalog().getDatastore("csv");
final Datastore dbDatastore = configuration.getDatastoreCatalog().getDatastore("orderdb");
final DatastoreConnection csvCon = csvDatastore.openConnection();
final DatastoreConnection dbCon = dbDatastore.openConnection();
try {
final Schema schema = csvCon.getDataContext().getDefaultSchema();
final String schemaName = schema.getName();
final String tableName = schema.getTable(0).getName();
final AnalyzerJobBuilder<InsertIntoTableAnalyzer> insert = jobBuilder
.addAnalyzer(InsertIntoTableAnalyzer.class);
insert.setConfiguredProperty("Datastore", csvDatastore);
insert.addInputColumn(jobBuilder.getSourceColumnByName("CUSTOMERNUMBER"));
insert.addInputColumn(concatenator.getOutputColumns().get(0));
insert.setConfiguredProperty("Schema name", schemaName);
insert.setConfiguredProperty("Table name", tableName);
insert.setConfiguredProperty("Column names", new String[] { "id", "name" });
insert.setConfiguredProperty("Buffer size", WriteBufferSizeOption.TINY);
// build the job
final AnalysisJob job = jobBuilder.toAnalysisJob();
// run the job in a distributed fashion
final DistributedAnalysisRunner runner = new DistributedAnalysisRunner(configuration, clusterManager);
final AnalysisResultFuture resultFuture = runner.run(job);
resultFuture.cancel();
Assert.assertTrue(resultFuture.isCancelled());
} finally {
dbCon.close();
csvCon.close();
jobBuilder.close();
}
}