本文整理匯總了Java中org.pentaho.di.core.Const.toInt方法的典型用法代碼示例。如果您正苦於以下問題:Java Const.toInt方法的具體用法?Java Const.toInt怎麽用?Java Const.toInt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.pentaho.di.core.Const
的用法示例。
在下文中一共展示了Const.toInt方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getInfo
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
/**
* @param set The data set to load the dialog information into
*/
public void getInfo( DataSet set ) {
set.setName( wName.getText() );
set.setDescription( wDescription.getText() );
set.setGroup( DataSetConst.findDataSetGroup( groups, wDataSetGroup.getText() ) );
set.setTableName( wTableName.getText() );
set.getFields().clear();
int nrFields = wFieldMapping.nrNonEmpty();
for ( int i = 0; i < nrFields; i++ ) {
TableItem item = wFieldMapping.getNonEmpty( i );
int colnr = 1;
String fieldName = item.getText( colnr++ );
String columnName = item.getText( colnr++ );
int type = ValueMetaFactory.getIdForValueMeta( item.getText( colnr++ ) );
int length = Const.toInt( item.getText( colnr++ ), -1 );
int precision = Const.toInt( item.getText( colnr++ ), -1 );
String comment = item.getText( colnr++ );
DataSetField field = new DataSetField( fieldName, columnName, type, length, precision, comment );
set.getFields().add( field );
}
}
示例2: readRep
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
@Override
public void readRep(Repository rep, IMetaStore metastore, ObjectId id_step, List<DatabaseMeta> databases)
throws KettleException {
this.cassandraNodes = rep.getStepAttributeString(id_step, 0, CASSANDRA_NODES);
this.cassandraPort = rep.getStepAttributeString(id_step, 0, CASSANDRA_PORT);
this.username = rep.getStepAttributeString(id_step, 0, USERNAME);
this.password = rep.getStepAttributeString(id_step, 0, PASSWORD);
if (!Const.isEmpty(this.password)) {
this.password = Encr.decryptPasswordOptionallyEncrypted(this.password);
}
this.keyspace = rep.getStepAttributeString(id_step, 0, CASSANDRA_KEYSPACE);
this.SslEnabled = rep.getStepAttributeBoolean(id_step, 0, CASSANDRA_WITH_SSL);
this.trustStoreFilePath = rep.getStepAttributeString(id_step, 0, CASSANDRA_TRUSTSTORE_FILE_PATH);
this.trustStorePass = rep.getStepAttributeString(id_step, 0, CASSANDRA_TRUSTSTORE_PASS);
if (!Const.isEmpty(this.trustStorePass)) {
this.trustStorePass = Encr.decryptPasswordOptionallyEncrypted(this.trustStorePass);
}
this.columnfamily = rep.getStepAttributeString(id_step, 0, CASSANDRA_COLUMN_FAMILY);
this.syncMode = rep.getStepAttributeBoolean(id_step, 0, SYNC_ENABLED);
String batchSize = rep.getStepAttributeString(id_step, 0, BATCH_SIZE);
this.batchSize = (Const.isEmpty(batchSize) ? 1000 : Integer.valueOf(batchSize));
String sCompression = rep.getStepAttributeString(id_step, 0, QUERY_COMPRESSION);
this.compression = (Const.isEmpty(sCompression) ? ConnectionCompression.SNAPPY : ConnectionCompression.fromString(sCompression));
this.specifyFields = rep.getStepAttributeBoolean(id_step, 0, SPECIFY_FIELDS);
int nrCols = rep.countNrStepAttributes(id_step, COLUMN_NAME);
int nrStreams = rep.countNrStepAttributes(id_step, STREAM_NAME);
int nrRows = nrCols < nrStreams ? nrStreams : nrCols;
allocate(nrRows);
for (int idx = 0; idx < nrRows; idx++) {
this.cassandraFields[idx] = Const.NVL(rep.getStepAttributeString(id_step, idx, COLUMN_NAME), "");
this.streamFields[idx] = Const.NVL(rep.getStepAttributeString(id_step, idx, STREAM_NAME), "");
}
this.ttl = Const.toInt(rep.getStepAttributeString(id_step, 0, TTL), 0);
}
示例3: validate
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
public boolean validate() throws KettleException {
boolean result = true;
if ( databaseMeta == null || Const.isEmpty( databaseMeta.getName() ) ) {
logError( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Validate.DatabaseIsEmpty" ) );
result = false;
} else if ( Const.isEmpty( managementAction ) ) {
logError( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Validate.ManagementAction" ) );
result = false;
} else if ( managementAction.equals( MANAGEMENT_ACTIONS[MANAGEMENT_ACTION_CREATE] ) ) {
if ( !Const.isEmpty( environmentSubstitute( maxClusterCount ) )
&& Const.toInt( environmentSubstitute( maxClusterCount ), -1 ) <= 0 ) {
logError( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Validate.MaxClusterCount",
environmentSubstitute( maxClusterCount ) ) );
return false;
}
if ( !Const.isEmpty( environmentSubstitute( minClusterCount ) )
&& Const.toInt( environmentSubstitute( minClusterCount ), -1 ) < 0 ) {
logError( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Validate.MinClusterCount",
environmentSubstitute( minClusterCount ) ) );
return false;
}
if ( !Const.isEmpty( environmentSubstitute( autoSuspend ) )
&& Const.toInt( environmentSubstitute( autoSuspend ), -1 ) < 0 ) {
logError( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Validate.AutoSuspend",
environmentSubstitute( autoSuspend ) ) );
return false;
}
}
return result;
}
示例4: loadXML
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
@Override
public void loadXML(Node stepnode, List<DatabaseMeta> databases,
IMetaStore metaStore) throws KettleXMLException {
Node fields = XMLHandler.getSubNode(stepnode, "fields");
int nrfields = XMLHandler.countNodes(fields, "field");
allocate(nrfields);
for (int i = 0; i < nrfields; i++) {
Node fnode = XMLHandler.getSubNodeByNr(fields, "field", i);
outputFields[i] = new TextFileField();
outputFields[i].setName(XMLHandler.getTagValue(fnode, "name"));
outputFields[i].setType(XMLHandler.getTagValue(fnode, "type"));
outputFields[i].setFormat(XMLHandler.getTagValue(fnode, "format"));
outputFields[i].setCurrencySymbol(XMLHandler.getTagValue(fnode,
"currency"));
outputFields[i].setDecimalSymbol(XMLHandler.getTagValue(fnode,
"decimal"));
outputFields[i].setGroupingSymbol(XMLHandler.getTagValue(fnode,
"group"));
outputFields[i].setTrimType(ValueMeta.getTrimTypeByCode(XMLHandler
.getTagValue(fnode, "trim_type")));
outputFields[i].setNullString(XMLHandler.getTagValue(fnode,
"nullif"));
outputFields[i].setLength(Const.toInt(
XMLHandler.getTagValue(fnode, "length"), -1));
outputFields[i].setPrecision(Const.toInt(
XMLHandler.getTagValue(fnode, "precision"), -1));
}
targetFieldName = XMLHandler.getTagValue(stepnode,
ConcatFieldsNodeNameSpace, "targetFieldName");
targetFieldLength = Const.toInt(XMLHandler.getTagValue(stepnode,
ConcatFieldsNodeNameSpace, "targetFieldLength"), 0);
}
示例5: loadXML
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
@Override
public void loadXML(Node stepnode, List<DatabaseMeta> databases, IMetaStore metastore) throws KettleXMLException {
this.cassandraNodes = XMLHandler.getTagValue(stepnode, CASSANDRA_NODES);
this.cassandraPort = XMLHandler.getTagValue(stepnode, CASSANDRA_PORT);
this.username = XMLHandler.getTagValue(stepnode, USERNAME);
this.password = XMLHandler.getTagValue(stepnode, PASSWORD);
if (!Const.isEmpty(this.password)) {
this.password = Encr.decryptPasswordOptionallyEncrypted(this.password);
}
this.keyspace = XMLHandler.getTagValue(stepnode, CASSANDRA_KEYSPACE);
this.SslEnabled = "Y".equalsIgnoreCase(XMLHandler.getTagValue(stepnode, CASSANDRA_WITH_SSL));
this.trustStoreFilePath = XMLHandler.getTagValue(stepnode, CASSANDRA_TRUSTSTORE_FILE_PATH);
this.trustStorePass = XMLHandler.getTagValue(stepnode, CASSANDRA_TRUSTSTORE_PASS);
if (!Const.isEmpty(this.trustStorePass)) {
this.trustStorePass = Encr.decryptPasswordOptionallyEncrypted(this.trustStorePass);
}
this.columnfamily = XMLHandler.getTagValue(stepnode, CASSANDRA_COLUMN_FAMILY);
this.syncMode = "Y".equalsIgnoreCase(XMLHandler.getTagValue(stepnode, SYNC_ENABLED));
String batchSize = XMLHandler.getTagValue(stepnode, BATCH_SIZE);
this.batchSize = (Const.isEmpty(batchSize) ? 1000 : Integer.valueOf(batchSize));
String sCompression = XMLHandler.getTagValue(stepnode, QUERY_COMPRESSION);
this.compression = (Const.isEmpty(sCompression) ? ConnectionCompression.SNAPPY : ConnectionCompression.fromString(sCompression));
this.specifyFields = "Y".equalsIgnoreCase(XMLHandler.getTagValue(stepnode, SPECIFY_FIELDS));
Node fields = XMLHandler.getSubNode(stepnode, FIELD_MAPPING);
int nrRows = XMLHandler.countNodes(fields, FIELD);
allocate(nrRows);
for (int i = 0; i < nrRows; i++) {
Node knode = XMLHandler.getSubNodeByNr(fields, FIELD, i);
this.cassandraFields[i] = XMLHandler.getTagValue(knode, COLUMN_NAME);
this.streamFields[i] = XMLHandler.getTagValue(knode, STREAM_NAME);
}
this.ttl = Const.toInt(XMLHandler.getTagValue(stepnode, TTL), 0);
}
示例6: doGet
import org.pentaho.di.core.Const; //導入方法依賴的package包/類
public void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException {
if ( isJettyMode() && !request.getContextPath().startsWith( CONTEXT_PATH ) ) {
return;
}
String serviceName = request.getParameter( "service" );
int lastSize = Const.toInt( request.getParameter( "last" ), -1 );
int lastPeriod = Const.toInt( request.getParameter( "lastPeriod" ), -1 );
long fromId = Const.toLong( request.getParameter( "fromId" ), -1L );
long toId = Const.toLong( request.getParameter( "toId" ), -1L );
int newSize = Const.toInt( request.getParameter( "new" ), -1 );
int maxWait = Const.toInt( request.getParameter( "maxWait" ), -1 );
String binaryOption = request.getParameter( "binary" );
boolean binary = "y".equalsIgnoreCase(binaryOption) || "true".equalsIgnoreCase(binaryOption);
long now = System.currentTimeMillis();
// last=60
// fromId=100
// new=5
// maxWait10
response.setStatus( HttpServletResponse.SC_OK );
if (binary) {
LogChannel.GENERAL.logBasic( "Binary data asked for service '"+serviceName+"'");
response.setContentType( "application/octet-stream" );
} else {
LogChannel.GENERAL.logBasic( "JSON data asked for service '"+serviceName+"'");
response.setContentType( "application/json" );
response.setCharacterEncoding( Const.XML_ENCODING );
}
try {
if ( !Const.isEmpty( serviceName ) ) {
StreamingCache cache = StreamingCache.getInstance();
StreamingCacheEntry streamingCacheEntry = cache.get( serviceName );
if ( streamingCacheEntry != null ) {
log.logBasic( "Cache entry of '"+serviceName+"' found");
// Now we have a cache entry for the service.
// Let's get the rows from the cache with the given options...
//
List<StreamingTimedNumberedRow> rows = streamingCacheEntry.findRows( log, lastSize, lastPeriod, fromId, toId, newSize, maxWait, now );
while ( rows == null ) {
// Keep retrying with a one second delay until the service transformation has some rows.
Thread.sleep( 1000 );
rows = streamingCacheEntry.findRows( log, lastSize, lastPeriod, fromId, toId, newSize, maxWait, now );
}
LogChannel.GENERAL.logBasic( "Data export for '"+serviceName+"' found, "+rows.size()+" rows found");
if (binary) {
writeBinaryData(serviceName, response, streamingCacheEntry, rows);
} else {
writeJsonData(serviceName, response, streamingCacheEntry, rows);
}
}
} else {
String comment = "Streaming cache service '" + serviceName + "' doesn't exist";
LogChannel.GENERAL.logError( comment );
throw new KettleException(comment);
}
} catch ( Exception e ) {
LogChannel.GENERAL.logError( "Error get streaming data for service '" + serviceName + "'", e);
try {
response.sendError(500, e.getMessage()+" - "+Const.getStackTracker(e));
} catch(IOException ioe) {
LogChannel.GENERAL.logError( "Error writing error response for service '" + serviceName + "'", ioe );
}
}
}