本文整理匯總了Java中org.pentaho.di.core.row.RowMetaInterface.getInteger方法的典型用法代碼示例。如果您正苦於以下問題:Java RowMetaInterface.getInteger方法的具體用法?Java RowMetaInterface.getInteger怎麽用?Java RowMetaInterface.getInteger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.pentaho.di.core.row.RowMetaInterface
的用法示例。
在下文中一共展示了RowMetaInterface.getInteger方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getClustersUsingSlave
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public synchronized String[] getClustersUsingSlave(long id_slave) throws KettleException
{
String sql = "SELECT DISTINCT "+quote(FIELD_CLUSTER_SLAVE_ID_CLUSTER)+" FROM "+databaseMeta.getQuotedSchemaTableCombination(null, TABLE_R_CLUSTER_SLAVE)+" WHERE "+quote(FIELD_CLUSTER_SLAVE_ID_SLAVE)+" = " + id_slave;
List<Object[]> list = database.getRows(sql, 100);
RowMetaInterface rowMeta = database.getReturnRowMeta();
List<String> clusterList = new ArrayList<String>();
for (int i=0;i<list.size();i++)
{
long id_cluster_schema = rowMeta.getInteger(list.get(i), quote(FIELD_CLUSTER_SLAVE_ID_CLUSTER), -1L);
if (id_cluster_schema > 0)
{
RowMetaAndData transRow = getClusterSchema(id_cluster_schema);
if (transRow!=null)
{
String clusterName = transRow.getString(quote(FIELD_CLUSTER_NAME), "<name not found>");
if (clusterName!=null) clusterList.add(clusterName);
}
}
}
return (String[]) clusterList.toArray(new String[clusterList.size()]);
}
示例2: getTransformationsWithIDList
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private String[] getTransformationsWithIDList(List<Object[]> list, RowMetaInterface rowMeta) throws KettleException
{
String[] transList = new String[list.size()];
for (int i=0;i<list.size();i++)
{
long id_transformation = rowMeta.getInteger( list.get(i), quote(FIELD_TRANSFORMATION_ID_TRANSFORMATION), -1L);
if (id_transformation > 0)
{
RowMetaAndData transRow = getTransformation(id_transformation);
if (transRow!=null)
{
String transName = transRow.getString(quote(FIELD_TRANSFORMATION_NAME), "<name not found>");
long id_directory = transRow.getInteger(quote(FIELD_TRANSFORMATION_ID_DIRECTORY), -1L);
RepositoryDirectory dir = directoryTree.findDirectory(id_directory);
transList[i]=dir.getPathObjectCombination(transName);
}
}
}
return transList;
}
示例3: getJobsWithIDList
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public String[] getJobsWithIDList(List<Object[]> list, RowMetaInterface rowMeta) throws KettleException
{
String[] jobList = new String[list.size()];
for (int i=0;i<list.size();i++)
{
long id_job = rowMeta.getInteger( list.get(i), quote(KettleDatabaseRepository.FIELD_JOB_ID_JOB), -1L);
if (id_job > 0)
{
RowMetaAndData jobRow = getJob(new LongObjectId(id_job));
if (jobRow!=null)
{
String jobName = jobRow.getString(KettleDatabaseRepository.FIELD_JOB_NAME, "<name not found>");
long id_directory = jobRow.getInteger(KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY, -1L);
RepositoryDirectoryInterface dir = repository.loadRepositoryDirectoryTree().findDirectory(new LongObjectId(id_directory)); // always reload the directory tree!
jobList[i]=dir.getPathObjectCombination(jobName);
}
}
}
return jobList;
}
示例4: getTransformationsWithIDList
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public String[] getTransformationsWithIDList(List<Object[]> list, RowMetaInterface rowMeta) throws KettleException
{
String[] transList = new String[list.size()];
for (int i=0;i<list.size();i++)
{
long id_transformation = rowMeta.getInteger( list.get(i), quote(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_TRANSFORMATION), -1L);
if (id_transformation > 0)
{
RowMetaAndData transRow = getTransformation(new LongObjectId(id_transformation));
if (transRow!=null)
{
String transName = transRow.getString(KettleDatabaseRepository.FIELD_TRANSFORMATION_NAME, "<name not found>");
long id_directory = transRow.getInteger(KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY, -1L);
RepositoryDirectoryInterface dir = repository.loadRepositoryDirectoryTree().findDirectory(new LongObjectId(id_directory));
transList[i]=dir.getPathObjectCombination(transName);
}
}
}
return transList;
}
示例5: getIDs
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private long[] getIDs(String sql) throws KettleException
{
List<Long> ids = new ArrayList<Long>();
ResultSet rs = database.openQuery(sql);
try
{
Object[] r = database.getRow(rs);
while (r != null)
{
RowMetaInterface rowMeta = database.getReturnRowMeta();
Long id = rowMeta.getInteger(r, 0);
if (id==null) id=new Long(0);
ids.add(id);
r = database.getRow(rs);
}
}
finally
{
if ( rs != null )
{
database.closeQuery(rs);
}
}
return convertLongList(ids);
}
示例6: getPartition
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public int getPartition(RowMetaInterface rowMeta, Object[] row ) throws KettleException
{
init(rowMeta);
if (partitionColumnIndex < 0)
{
partitionColumnIndex = rowMeta.indexOfValue(fieldName);
if (partitionColumnIndex < 0) {
throw new KettleStepException("Unable to find partitioning field name [" + fieldName + "] in the output row..." + rowMeta);
}
}
long value;
ValueMetaInterface valueMeta = rowMeta.getValueMeta(partitionColumnIndex);
Object valueData = row[partitionColumnIndex];
switch(valueMeta.getType()) {
case ValueMetaInterface.TYPE_INTEGER:
Long longValue = rowMeta.getInteger(row, partitionColumnIndex);
if (longValue==null) {
value = valueMeta.hashCode(valueData);
}
else {
value = longValue.longValue();
}
default:
value = valueMeta.hashCode(valueData);
}
/*
value = rowMeta.getInteger(row, partitionColumnIndex);
*/
int targetLocation = (int)(value % nrPartitions);
return targetLocation;
}
示例7: getIDs
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public ObjectId[] getIDs(String sql, ObjectId...objectId) throws KettleException
{
// Get the prepared statement
//
PreparedStatement ps = sqlMap.get(sql);
if (ps == null) {
ps = database.prepareSQL(sql);
sqlMap.put(sql, ps);
}
// Assemble the parameters (if any)
//
RowMetaInterface parameterMeta = new RowMeta();
Object[] parameterData = new Object[objectId.length];
for (int i = 0; i < objectId.length; i++) {
parameterMeta.addValueMeta(new ValueMeta("id" + (i + 1), ValueMetaInterface.TYPE_INTEGER));
parameterData[i] = ((LongObjectId)objectId[i]).longValue();
}
ResultSet resultSet = database.openQuery(ps, parameterMeta, parameterData);
List<Object[]> rows = database.getRows(resultSet, 0, null);
if (Const.isEmpty(rows)) {
return new ObjectId[0];
}
RowMetaInterface rowMeta = database.getReturnRowMeta();
ObjectId[] ids = new ObjectId[rows.size()];
for (int i=0;i<ids.length;i++) {
Object[] row = rows.get(i);
ids[i] = new LongObjectId(rowMeta.getInteger(row, 0));
}
return ids;
}
開發者ID:yintaoxue,項目名稱:read-open-source-code,代碼行數:35,代碼來源:KettleDatabaseRepositoryConnectionDelegate.java
示例8: getPartition
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
public int getPartition(RowMetaInterface rowMeta, Object[] row ) throws KettleException
{
init(rowMeta);
if (partitionColumnIndex < 0)
{
partitionColumnIndex = rowMeta.indexOfValue(fieldName);
if (partitionColumnIndex < 0) {
throw new KettleStepException("Unable to find partitioning field name [" + fieldName + "] in the output row..." + rowMeta);
}
}
long value;
ValueMetaInterface valueMeta = rowMeta.getValueMeta(partitionColumnIndex);
Object valueData = row[partitionColumnIndex];
switch(valueMeta.getType()) {
case ValueMetaInterface.TYPE_INTEGER:
Long longValue = rowMeta.getInteger(row, partitionColumnIndex);
if (longValue==null) {
value = valueMeta.hashCode(valueData);
}
else {
value = longValue.longValue();
}
default:
value = valueMeta.hashCode(valueData);
}
/*
value = rowMeta.getInteger(row, partitionColumnIndex);
*/
int targetLocation = (int)(Math.abs(value) % nrPartitions);
return targetLocation;
}
示例9: loadDatabase
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
/**
* Runs the commands to put the data to the Snowflake stage, the copy command to load the table, and finally
* a commit to commit the transaction.
* @throws KettleDatabaseException
* @throws KettleFileException
* @throws KettleValueException
*/
private void loadDatabase() throws KettleDatabaseException, KettleFileException, KettleValueException {
boolean filesUploaded = false;
boolean endsWithSlash = environmentSubstitute( meta.getWorkDirectory() ).endsWith( "\\" )
|| environmentSubstitute( meta.getWorkDirectory() ).endsWith( "/" );
String SQL = "PUT 'file://" + environmentSubstitute( meta.getWorkDirectory() ).replaceAll( "\\\\", "/" )
+ ( endsWithSlash ? "" : "/" ) + environmentSubstitute( meta.getTargetTable() ) + "_"
+ meta.getFileDate() + "_*' " + meta.getStage( this ) + ";";
logDebug( "Executing SQL " + SQL );
ResultSet putResultSet = data.db.openQuery( SQL, null, null, ResultSet.FETCH_FORWARD, false );
RowMetaInterface putRowMeta = data.db.getReturnRowMeta();
Object[] putRow = data.db.getRow( putResultSet );
logDebug( "=========================Put File Results======================" );
int fileNum = 0;
while ( putRow != null ) {
logDebug( "------------------------ File " + fileNum +"--------------------------" );
for ( int i = 0; i < putRowMeta.getFieldNames().length; i++ ) {
logDebug( putRowMeta.getFieldNames()[i] + " = " + putRowMeta.getString( putRow, i ) );
if( putRowMeta.getFieldNames()[i].equalsIgnoreCase( "status" ) ) {
if( putRowMeta.getString( putRow, i ).equalsIgnoreCase( "ERROR" ) ) {
throw new KettleDatabaseException( "Error putting file to Snowflake stage \n" + putRowMeta.getString( putRow, "message", "" ) );
}
}
}
fileNum++;
putRow = data.db.getRow( putResultSet );
}
data.db.closeQuery( putResultSet );
String copySQL = meta.getCopyStatement( this, data.getPreviouslyOpenedFiles() );
logDebug( "Executing SQL " + copySQL );
ResultSet resultSet = data.db.openQuery( copySQL, null, null, ResultSet.FETCH_FORWARD, false );
RowMetaInterface rowMeta = data.db.getReturnRowMeta();
Object[] row = data.db.getRow( resultSet );
int rowsLoaded = 0;
int rowsLoadedField = rowMeta.indexOfValue( "rows_loaded" );
int rowsError = 0;
int errorField = rowMeta.indexOfValue( "errors_seen" );
logBasic( "====================== Bulk Load Results======================" );
int rowNum = 1;
while ( row != null ) {
logBasic( "---------------------- Row " + rowNum + " ----------------------" );
for ( int i = 0; i < rowMeta.getFieldNames().length; i++ ) {
logBasic( rowMeta.getFieldNames()[i] + " = " + rowMeta.getString( row, i ) );
}
if ( rowsLoadedField >= 0 ) {
rowsLoaded += rowMeta.getInteger( row, rowsLoadedField );
}
if ( errorField >= 0 ) {
rowsError += rowMeta.getInteger( row, errorField );
}
rowNum++;
row = data.db.getRow( resultSet );
}
data.db.closeQuery( resultSet );
setLinesOutput( rowsLoaded );
setLinesRejected( rowsError );
data.db.execStatement( "commit" );
}
示例10: searchNrStepAttributes
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private synchronized int searchNrStepAttributes(long id_step, String code) throws KettleValueException
{
// Search the index of the first step attribute with the specified code...
//
int idx = searchStepAttributeIndexInBuffer(id_step, code, 0L);
if (idx<0) return 0;
int nr = 1;
int offset = 1;
if (idx+offset>=stepAttributesBuffer.size())
{
// Only 1, the last of the attributes buffer.
//
return 1;
}
Object[] look = (Object[])stepAttributesBuffer.get(idx+offset);
RowMetaInterface rowMeta = stepAttributesRowMeta;
long lookID = rowMeta.getInteger(look, 0);
String lookCode = rowMeta.getString(look, 1);
while (lookID==id_step && code.equalsIgnoreCase( lookCode ) )
{
// Find the maximum
//
nr = rowMeta.getInteger(look, 2).intValue() + 1;
offset++;
if (idx+offset<stepAttributesBuffer.size())
{
look = (Object[])stepAttributesBuffer.get(idx+offset);
lookID = rowMeta.getInteger(look, 0);
lookCode = rowMeta.getString(look, 1);
}
else
{
return nr;
}
}
return nr;
}
示例11: addToCache
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private void addToCache(RowMetaInterface keyMeta, Object[] keyData, RowMetaInterface valueMeta, Object[] valueData) throws KettleValueException
{
if (meta.isMemoryPreservationActive())
{
if (meta.isUsingSortedList())
{
KeyValue keyValue = new KeyValue(keyData, valueData);
int idx = Collections.binarySearch(data.list, keyValue, data.comparator);
if (idx<0)
{
int index = -idx-1; // this is the insertion point
data.list.add(index, keyValue); // insert to keep sorted.
}
else
{
data.list.set(idx, keyValue); // Overwrite to simulate Hashtable behaviour
}
}
else
{
if (meta.isUsingIntegerPair())
{
if (!data.metadataVerifiedIntegerPair) {
data.metadataVerifiedIntegerPair=true;
if (keyMeta.size()!=1 || valueMeta.size()!=1 || !keyMeta.getValueMeta(0).isInteger() || !valueMeta.getValueMeta(0).isInteger()) {
throw new KettleValueException(Messages.getString("StreamLookup.Exception.CanNotUseIntegerPairAlgorithm"));
}
}
Long key = keyMeta.getInteger(keyData, 0);
Long value = valueMeta.getInteger(valueData, 0);
data.longIndex.put(key, value);
}
else
{
if (data.hashIndex==null)
{
data.hashIndex = new ByteArrayHashIndex(keyMeta);
}
data.hashIndex.put(RowMeta.extractData(keyMeta, keyData), RowMeta.extractData(valueMeta, valueData));
}
}
}
else
{
// We can't just put Object[] in the map
// The compare function is not in it.
// We need to wrap in and use that.
// Let's use RowMetaAndData for this one.
//
data.look.put(new RowMetaAndData(keyMeta, keyData), valueData);
}
}
示例12: searchNrStepAttributes
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private synchronized int searchNrStepAttributes(ObjectId id_step, String code) throws KettleValueException
{
// Search the index of the first step attribute with the specified code...
//
int idx = searchStepAttributeIndexInBuffer(id_step, code, 0L);
if (idx<0) return 0;
int nr = 1;
int offset = 1;
if (idx+offset>=stepAttributesBuffer.size())
{
// Only 1, the last of the attributes buffer.
//
return 1;
}
Object[] look = (Object[])stepAttributesBuffer.get(idx+offset);
RowMetaInterface rowMeta = stepAttributesRowMeta;
long lookID = rowMeta.getInteger(look, 0);
String lookCode = rowMeta.getString(look, 1);
while (lookID==new LongObjectId(id_step).longValue() && code.equalsIgnoreCase( lookCode ) )
{
// Find the maximum
//
nr = rowMeta.getInteger(look, 2).intValue() + 1;
offset++;
if (idx+offset<stepAttributesBuffer.size())
{
look = (Object[])stepAttributesBuffer.get(idx+offset);
lookID = rowMeta.getInteger(look, 0);
lookCode = rowMeta.getString(look, 1);
}
else
{
return nr;
}
}
return nr;
}
開發者ID:yintaoxue,項目名稱:read-open-source-code,代碼行數:43,代碼來源:KettleDatabaseRepositoryConnectionDelegate.java
示例13: addToCache
import org.pentaho.di.core.row.RowMetaInterface; //導入方法依賴的package包/類
private void addToCache(RowMetaInterface keyMeta, Object[] keyData, RowMetaInterface valueMeta, Object[] valueData) throws KettleValueException
{
if (meta.isMemoryPreservationActive())
{
if (meta.isUsingSortedList())
{
KeyValue keyValue = new KeyValue(keyData, valueData);
int idx = Collections.binarySearch(data.list, keyValue, data.comparator);
if (idx<0)
{
int index = -idx-1; // this is the insertion point
data.list.add(index, keyValue); // insert to keep sorted.
}
else
{
data.list.set(idx, keyValue); // Overwrite to simulate Hashtable behaviour
}
}
else
{
if (meta.isUsingIntegerPair())
{
if (!data.metadataVerifiedIntegerPair) {
data.metadataVerifiedIntegerPair=true;
if (keyMeta.size()!=1 || valueMeta.size()!=1 || !keyMeta.getValueMeta(0).isInteger() || !valueMeta.getValueMeta(0).isInteger()) {
throw new KettleValueException(BaseMessages.getString(PKG, "StreamLookup.Exception.CanNotUseIntegerPairAlgorithm"));
}
}
Long key = keyMeta.getInteger(keyData, 0);
Long value = valueMeta.getInteger(valueData, 0);
data.longIndex.put(key, value);
}
else
{
if (data.hashIndex==null)
{
data.hashIndex = new ByteArrayHashIndex(keyMeta);
}
data.hashIndex.put(RowMeta.extractData(keyMeta, keyData), RowMeta.extractData(valueMeta, valueData));
}
}
}
else
{
// We can't just put Object[] in the map
// The compare function is not in it.
// We need to wrap in and use that.
// Let's use RowMetaAndData for this one.
//
data.look.put(new RowMetaAndData(keyMeta, keyData), valueData);
}
}