本文整理汇总了Java中org.pentaho.di.cluster.SlaveServer类的典型用法代码示例。如果您正苦于以下问题:Java SlaveServer类的具体用法?Java SlaveServer怎么用?Java SlaveServer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SlaveServer类属于org.pentaho.di.cluster包,在下文中一共展示了SlaveServer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML( Node entryNode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep ) throws KettleXMLException {
try {
super.loadXML( entryNode, databases, slaveServers );
String dbname = XMLHandler.getTagValue( entryNode, CONNECTION );
databaseMeta = DatabaseMeta.findDatabase( databases, dbname );
setManagementAction( XMLHandler.getTagValue( entryNode, MANAGEMENT_ACTION ) );
setReplace( "Y".equalsIgnoreCase( XMLHandler.getTagValue( entryNode, REPLACE ) ) );
setFailIfExists( "Y".equalsIgnoreCase( XMLHandler.getTagValue( entryNode, FAIL_IF_EXISTS ) ) );
setWarehouseName( XMLHandler.getTagValue( entryNode, WAREHOUSE_NAME ) );
setWarehouseSize( XMLHandler.getTagValue( entryNode, WAREHOUSE_SIZE ) );
setWarehouseType( XMLHandler.getTagValue( entryNode, WAREHOUSE_TYPE ) );
setMaxClusterCount( XMLHandler.getTagValue( entryNode, MAX_CLUSTER_COUNT ) );
setMinClusterCount( XMLHandler.getTagValue( entryNode, MIN_CLUSTER_COUNT ) );
setAutoSuspend( XMLHandler.getTagValue( entryNode, AUTO_SUSPEND ) );
setAutoResume( "Y".equalsIgnoreCase( XMLHandler.getTagValue( entryNode, AUTO_RESUME ) ) );
setInitiallySuspended( "Y".equalsIgnoreCase( XMLHandler.getTagValue( entryNode, INITIALLY_SUSPENDED ) ) );
setResourceMonitor( XMLHandler.getTagValue( entryNode, RESOURCE_MONITOR ) );
setComment( XMLHandler.getTagValue( entryNode, COMMENT ) );
setFailIfNotExists( "Y".equalsIgnoreCase( XMLHandler.getTagValue( entryNode, FAIL_IF_NOT_EXISTS ) ) );
} catch ( KettleXMLException dbe ) {
throw new KettleXMLException( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Error.Exception.UnableLoadXML" ), dbe );
}
}
示例2: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases,
List<SlaveServer> slaveServers ) throws KettleException {
try {
setManagementAction( rep.getJobEntryAttributeString( id_jobentry, MANAGEMENT_ACTION ) );
setReplace( rep.getJobEntryAttributeBoolean( id_jobentry, REPLACE ) );
setFailIfExists( rep.getJobEntryAttributeBoolean( id_jobentry, FAIL_IF_EXISTS ) );
setWarehouseName( rep.getJobEntryAttributeString( id_jobentry, WAREHOUSE_NAME ) );
setWarehouseSize( rep.getJobEntryAttributeString( id_jobentry, WAREHOUSE_SIZE ) );
setWarehouseType( rep.getJobEntryAttributeString( id_jobentry, WAREHOUSE_TYPE ) );
setMaxClusterCount( rep.getJobEntryAttributeString( id_jobentry, MAX_CLUSTER_COUNT ) );
setMinClusterCount( rep.getJobEntryAttributeString( id_jobentry, MIN_CLUSTER_COUNT ) );
setAutoSuspend( rep.getJobEntryAttributeString( id_jobentry, AUTO_SUSPEND ) );
setAutoResume( rep.getJobEntryAttributeBoolean( id_jobentry, AUTO_RESUME ) );
setInitiallySuspended( rep.getJobEntryAttributeBoolean( id_jobentry, INITIALLY_SUSPENDED ) );
setResourceMonitor( rep.getJobEntryAttributeString( id_jobentry, RESOURCE_MONITOR ) );
setComment( rep.getJobEntryAttributeString( id_jobentry, COMMENT ) );
databaseMeta = rep.loadDatabaseMetaFromJobEntryAttribute( id_jobentry, CONNECTION, "id_database", databases );
setFailIfNotExists( rep.getJobEntryAttributeBoolean( id_jobentry, FAIL_IF_NOT_EXISTS ) );
} catch ( KettleException dbe ) {
throw new KettleException( BaseMessages.getString( PKG, "SnowflakeWarehouseManager.Error.Exception.UnableLoadRep" )
+ id_jobentry, dbe );
}
}
示例3: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep(Repository rep, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers) throws KettleException
{
try
{
connection = rep.loadDatabaseMetaFromJobEntryAttribute(id_jobentry, "connection", "id_database", databases);
schemaname = rep.getJobEntryAttributeString(id_jobentry, "schemaname");
tablename = rep.getJobEntryAttributeString(id_jobentry, "tablename");
successCondition = getSuccessConditionByCode(Const.NVL(rep.getJobEntryAttributeString(id_jobentry,"success_condition"), ""));
limit = rep.getJobEntryAttributeString(id_jobentry, "limit");
iscustomSQL = rep.getJobEntryAttributeBoolean(id_jobentry, "is_custom_sql");
isUseVars = rep.getJobEntryAttributeBoolean(id_jobentry, "is_usevars");
isAddRowsResult = rep.getJobEntryAttributeBoolean(id_jobentry, "add_rows_result");
isClearResultList = rep.getJobEntryAttributeBoolean(id_jobentry, "clear_result_rows");
customSQL = rep.getJobEntryAttributeString(id_jobentry, "custom_sql");
}
catch(KettleDatabaseException dbe)
{
throw new KettleException(BaseMessages.getString(PKG, "JobEntryEvalTableContent.UnableLoadRep",""+id_jobentry), dbe);
}
}
示例4: insertClusterSlave
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public synchronized long insertClusterSlave(ClusterSchema clusterSchema, SlaveServer slaveServer) throws KettleException
{
long id = getNextClusterSlaveID();
RowMetaAndData table = new RowMetaAndData();
table.addValue(new ValueMeta(FIELD_CLUSTER_SLAVE_ID_CLUSTER_SLAVE, ValueMetaInterface.TYPE_INTEGER), new Long(id));
table.addValue(new ValueMeta(FIELD_CLUSTER_SLAVE_ID_CLUSTER, ValueMetaInterface.TYPE_INTEGER), new Long(clusterSchema.getId()));
table.addValue(new ValueMeta(FIELD_CLUSTER_SLAVE_ID_SLAVE, ValueMetaInterface.TYPE_INTEGER), new Long(slaveServer.getId()));
database.prepareInsert(table.getRowMeta(), TABLE_R_CLUSTER_SLAVE);
database.setValuesInsert(table);
database.insertRow();
database.closeInsert();
return id;
}
示例5: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep(Repository rep, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers) throws KettleException
{
try
{
port = rep.getJobEntryAttributeString(id_jobentry, "port");
serverName = rep.getJobEntryAttributeString(id_jobentry, "servername");
oid = rep.getJobEntryAttributeString(id_jobentry, "oid");
message = rep.getJobEntryAttributeString(id_jobentry, "message");
comString = rep.getJobEntryAttributeString(id_jobentry, "comstring");
timeout = rep.getJobEntryAttributeString(id_jobentry, "timeout");
nrretry = rep.getJobEntryAttributeString(id_jobentry, "nrretry");
targettype = rep.getJobEntryAttributeString(id_jobentry, "targettype");
user = rep.getJobEntryAttributeString(id_jobentry, "user");
passphrase = rep.getJobEntryAttributeString(id_jobentry, "passphrase");
engineid = rep.getJobEntryAttributeString(id_jobentry, "engineid");
}
catch(KettleException dbe)
{
throw new KettleException("Unable to load job entry of type 'SNMPTrap' from the repository for id_jobentry="+id_jobentry, dbe);
}
}
示例6: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException {
try {
super.loadXML(entrynode, databases, slaveServers);
argFromPrevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "arg_from_previous")); //$NON-NLS-1$ //$NON-NLS-2$
success_condition = XMLHandler.getTagValue(entrynode, "success_condition");
limit_folders = XMLHandler.getTagValue(entrynode, "limit_folders");
Node fields = XMLHandler.getSubNode(entrynode, "fields"); //$NON-NLS-1$
// How many field arguments?
int nrFields = XMLHandler.countNodes(fields, "field"); //$NON-NLS-1$
arguments = new String[nrFields];
// Read them all...
for (int i = 0; i < nrFields; i++) {
Node fnode = XMLHandler.getSubNodeByNr(fields, "field", i); //$NON-NLS-1$
arguments[i] = XMLHandler.getTagValue(fnode, "name"); //$NON-NLS-1$
}
} catch (KettleXMLException xe) {
throw new KettleXMLException(BaseMessages.getString(PKG, "JobEntryDeleteFolders.UnableToLoadFromXml"), xe); //$NON-NLS-1$
}
}
示例7: delSlaveServer
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void delSlaveServer(HasSlaveServersInterface hasSlaveServersInterface, SlaveServer slaveServer) throws KettleException
{
Repository rep = spoon.getRepository();
if (rep != null && slaveServer.getId() > 0)
{
// remove the slave server from the repository too...
rep.delSlave(slaveServer.getId());
}
int idx = hasSlaveServersInterface.getSlaveServers().indexOf(slaveServer);
hasSlaveServersInterface.getSlaveServers().remove(idx);
spoon.refreshTree();
}
示例8: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException
{
try
{
super.loadXML(entrynode, databases, slaveServers);
start = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "start"));
dummy = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "dummy"));
repeat = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "repeat"));
setSchedulerType( Const.toInt(XMLHandler.getTagValue(entrynode, "schedulerType"), NOSCHEDULING) );
setIntervalSeconds ( Const.toInt(XMLHandler.getTagValue(entrynode, "intervalSeconds"), 0) );
setIntervalMinutes ( Const.toInt(XMLHandler.getTagValue(entrynode, "intervalMinutes"), 0) );
setHour ( Const.toInt(XMLHandler.getTagValue(entrynode, "hour"), 0) );
setMinutes ( Const.toInt(XMLHandler.getTagValue(entrynode, "minutes"), 0) );
setWeekDay ( Const.toInt(XMLHandler.getTagValue(entrynode, "weekDay"), 0) );
setDayOfMonth( Const.toInt(XMLHandler.getTagValue(entrynode, "dayOfMonth"), 0) );
}
catch(KettleException e)
{
throw new KettleXMLException("Unable to load job entry of type 'special' from XML node", e);
}
}
示例9: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException
{
try
{
super.loadXML(entrynode, databases, slaveServers);
serverName = XMLHandler.getTagValue(entrynode, "servername");
serverPort = XMLHandler.getTagValue(entrynode, "serverport");
userName = XMLHandler.getTagValue(entrynode, "username");
password = Encr.decryptPasswordOptionallyEncrypted( XMLHandler.getTagValue(entrynode, "password") );
sftpDirectory = XMLHandler.getTagValue(entrynode, "sftpdirectory");
localDirectory = XMLHandler.getTagValue(entrynode, "localdirectory");
wildcard = XMLHandler.getTagValue(entrynode, "wildcard");
remove = "Y".equalsIgnoreCase( XMLHandler.getTagValue(entrynode, "remove") );
copyprevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue(entrynode, "copyprevious") );
addFilenameResut = "Y".equalsIgnoreCase( XMLHandler.getTagValue(entrynode, "addFilenameResut") );
}
catch(KettleXMLException xe)
{
throw new KettleXMLException("Unable to load job entry of type 'SFTPPUT' from XML node", xe);
}
}
示例10: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep(Repository rep, long id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers) throws KettleException
{
try
{
super.loadRep(rep, id_jobentry, databases, slaveServers);
filename = rep.getJobEntryAttributeString(id_jobentry, "filename");
// How many arguments?
int argnr = rep.countNrJobEntryAttributes(id_jobentry, "name"); //$NON-NLS-1$
arguments = new String[argnr];
// Read them all...
for (int a = 0; a < argnr; a++)
{
arguments[a] = rep.getJobEntryAttributeString(id_jobentry, a, "name");
}
}
catch(KettleException dbe)
{
throw new KettleException(Messages.getString("JobEntryFilesExist.ERROR_0002_Cannot_Load_Job_From_Repository",""+id_jobentry, dbe.getMessage()));
}
}
示例11: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException
{
try
{
super.loadXML(entrynode, databases, slaveServers);
filename = XMLHandler.getTagValue(entrynode, "filename");
Node fields = XMLHandler.getSubNode(entrynode, "fields"); //$NON-NLS-1$
// How many field arguments?
int nrFields = XMLHandler.countNodes(fields, "field"); //$NON-NLS-1$
arguments = new String[nrFields];
// Read them all...
for (int i = 0; i < nrFields; i++) {
Node fnode = XMLHandler.getSubNodeByNr(fields, "field", i); //$NON-NLS-1$
arguments[i] = XMLHandler.getTagValue(fnode, "name"); //$NON-NLS-1$
}
}
catch(KettleXMLException xe)
{
throw new KettleXMLException(Messages.getString("JobEntryFilesExist.ERROR_0001_Cannot_Load_Job_Entry_From_Xml_Node", xe.getMessage()));
}
}
示例12: loadXML
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadXML(Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep) throws KettleXMLException
{
try
{
super.loadXML(entrynode, databases, slaveServers);
Node fields = XMLHandler.getSubNode(entrynode, "connections"); //$NON-NLS-1$
// How many hosts?
int nrFields = XMLHandler.countNodes(fields, "connection"); //$NON-NLS-1$
connections = new DatabaseMeta[nrFields];
waitfors = new String[nrFields];
waittimes = new int[nrFields];
// Read them all...
for (int i = 0; i < nrFields; i++) {
Node fnode = XMLHandler.getSubNodeByNr(fields, "connection", i); //$NON-NLS-1$
String dbname = XMLHandler.getTagValue(fnode, "name"); //$NON-NLS-1$
connections[i] = DatabaseMeta.findDatabase(databases, dbname);
waitfors[i] = XMLHandler.getTagValue(fnode, "waitfor"); //$NON-NLS-1$
waittimes[i] = getWaitByCode(Const.NVL(XMLHandler.getTagValue(fnode, "waittime"), ""));
}
}
catch(KettleXMLException xe)
{
throw new KettleXMLException(BaseMessages.getString(PKG, "JobEntryCheckDbConnections.ERROR_0001_Cannot_Load_Job_Entry_From_Xml_Node", xe.getMessage()));
}
}
示例13: loadSlaveServer
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public SlaveServer loadSlaveServer(ObjectId id_slave_server) throws KettleException {
SlaveServer slaveServer = new SlaveServer();
slaveServer.setObjectId(id_slave_server);
RowMetaAndData row = getSlaveServer(id_slave_server);
if (row == null) {
throw new KettleDatabaseException(BaseMessages.getString(PKG, "SlaveServer.SlaveCouldNotBeFound", id_slave_server.toString())); //$NON-NLS-1$
}
slaveServer.setName(row.getString(KettleDatabaseRepository.FIELD_SLAVE_NAME, null)); //$NON-NLS-1$
slaveServer.setHostname(row.getString(KettleDatabaseRepository.FIELD_SLAVE_HOST_NAME, null)); //$NON-NLS-1$
slaveServer.setPort(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PORT, null)); //$NON-NLS-1$
slaveServer.setWebAppName(row.getString(KettleDatabaseRepository.FIELD_SLAVE_WEB_APP_NAME, null)); //$NON-NLS-1$
slaveServer.setUsername(row.getString(KettleDatabaseRepository.FIELD_SLAVE_USERNAME, null)); //$NON-NLS-1$
slaveServer.setPassword(Encr.decryptPasswordOptionallyEncrypted(row.getString(
KettleDatabaseRepository.FIELD_SLAVE_PASSWORD, null))); //$NON-NLS-1$
slaveServer.setProxyHostname(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PROXY_HOST_NAME, null)); //$NON-NLS-1$
slaveServer.setProxyPort(row.getString(KettleDatabaseRepository.FIELD_SLAVE_PROXY_PORT, null)); //$NON-NLS-1$
slaveServer.setNonProxyHosts(row.getString(KettleDatabaseRepository.FIELD_SLAVE_NON_PROXY_HOSTS, null)); //$NON-NLS-1$
slaveServer.setMaster(row.getBoolean(KettleDatabaseRepository.FIELD_SLAVE_MASTER, false)); //$NON-NLS-1$
return slaveServer;
}
开发者ID:yintaoxue,项目名称:read-open-source-code,代码行数:25,代码来源:KettleDatabaseRepositorySlaveServerDelegate.java
示例14: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep(Repository rep, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers) throws KettleException
{
try {
port = rep.getJobEntryAttributeString(id_jobentry, "port");
serverName = rep.getJobEntryAttributeString(id_jobentry, "servername");
facility = rep.getJobEntryAttributeString(id_jobentry, "facility");
priority = rep.getJobEntryAttributeString(id_jobentry, "priority");
message = rep.getJobEntryAttributeString(id_jobentry, "message");
datePattern = rep.getJobEntryAttributeString(id_jobentry, "datePattern");
addTimestamp=rep.getJobEntryAttributeBoolean(id_jobentry, "addTimestamp");
addHostname=rep.getJobEntryAttributeBoolean(id_jobentry, "addHostname");
}
catch(KettleException dbe)
{
throw new KettleException("Unable to load job entry of type 'Syslog' from the repository for id_jobentry="+id_jobentry, dbe);
}
}
示例15: loadRep
import org.pentaho.di.cluster.SlaveServer; //导入依赖的package包/类
public void loadRep(Repository rep, long id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers)
throws KettleException
{
try
{
super.loadRep(rep, id_jobentry, databases, slaveServers);
valuetype = getValueTypeByCode(Const.NVL(rep.getJobEntryAttributeString(id_jobentry,"valuetype"), ""));
fieldname = rep.getJobEntryAttributeString(id_jobentry, "fieldname");
variablename = rep.getJobEntryAttributeString(id_jobentry, "variablename");
fieldtype = getFieldTypeByCode(Const.NVL(rep.getJobEntryAttributeString(id_jobentry,"fieldtype"), ""));
mask = rep.getJobEntryAttributeString(id_jobentry, "mask");
comparevalue = rep.getJobEntryAttributeString(id_jobentry, "comparevalue");
minvalue = rep.getJobEntryAttributeString(id_jobentry, "minvalue");
maxvalue = rep.getJobEntryAttributeString(id_jobentry, "maxvalue");
successcondition = getSuccessConditionByCode(Const.NVL(rep.getJobEntryAttributeString(id_jobentry,"successcondition"), ""));
successnumbercondition = getSuccessNumberConditionByCode(Const.NVL(rep.getJobEntryAttributeString(id_jobentry,"successnumbercondition"), ""));
}
catch(KettleException dbe)
{
throw new KettleException(Messages.getString("JobEntrySimple.Error.Exception.UnableLoadRep")+id_jobentry, dbe);
}
}