本文整理汇总了Java中org.pentaho.di.core.vfs.KettleVFS.getOutputStream方法的典型用法代码示例。如果您正苦于以下问题:Java KettleVFS.getOutputStream方法的具体用法?Java KettleVFS.getOutputStream怎么用?Java KettleVFS.getOutputStream使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.pentaho.di.core.vfs.KettleVFS
的用法示例。
在下文中一共展示了KettleVFS.getOutputStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: saveToFile
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public void saveToFile() throws IOException, KettleException
{
OutputStream outputStream = KettleVFS.getOutputStream(filename, false);
PrintStream out = new PrintStream(outputStream);
out.print(XMLHandler.getXMLHeader(Const.XML_ENCODING));
out.println("<"+XML_TAG+">");
Collection<SharedObjectInterface> collection = objectsMap.values();
for (SharedObjectInterface sharedObject : collection)
{
out.println(sharedObject.getXML());
}
out.println("</"+XML_TAG+">");
out.flush();
out.close();
outputStream.close();
}
示例2: LogChannelFileWriter
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
/**
* Create a new log channel file writer
* @param logChannelId The log channel (+children) to write to the log file
* @param logFile The logging file to write to
* @param appending set to true if you want to append to an existing file
* @param pollingInterval The polling interval in milliseconds.
*
* @throws KettleException in case the specified log file can't be created.
*/
public LogChannelFileWriter(String logChannelId, FileObject logFile, boolean appending, int pollingInterval) throws KettleException {
this.logChannelId = logChannelId;
this.logFile = logFile;
this.appending = appending;
this.pollingInterval = pollingInterval;
active = new AtomicBoolean(false);
lastBufferLineNr = CentralLogStore.getLastBufferLineNr();
try {
logFileOutputStream = KettleVFS.getOutputStream(logFile, appending);
} catch(IOException e) {
throw new KettleException("There was an error while trying to open file '"+logFile+"' for writing", e);
}
}
示例3: PrepareFile
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private void PrepareFile() throws IOException
{
String filename=environmentSubstitute(meta.getFilename());
if(meta.isAddToResultFiles())
{
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(filename), getTransMeta().getName(), getStepname());
resultFile.setComment("This file was created with a cube file output step");
addResultFile(resultFile);
}
data.fos=KettleVFS.getOutputStream(filename, false);
data.zip=new GZIPOutputStream(data.fos);
data.dos=new DataOutputStream(data.zip);
}
示例4: prepareFile
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private void prepareFile() throws KettleFileException
{
try {
String filename=environmentSubstitute(meta.getFilename());
if(meta.isAddToResultFiles())
{
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(filename, getTransMeta()), getTransMeta().getName(), getStepname());
resultFile.setComment("This file was created with a cube file output step");
addResultFile(resultFile);
}
data.fos=KettleVFS.getOutputStream(filename, getTransMeta(), false);
data.zip=new GZIPOutputStream(data.fos);
data.dos=new DataOutputStream(data.zip);
}
catch(Exception e) {
throw new KettleFileException(e);
}
}
示例5: saveTrans
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
@Override
public void saveTrans(TransMeta transMeta, String name, String directoryPath, Repository repository, IMetaStore metaStore, VariableSpace variableSpace) throws KettleException {
String xml = XMLHandler.getXMLHeader() + transMeta.getXML();
DataOutputStream dos = new DataOutputStream(KettleVFS.getOutputStream(name, false));
try {
dos.write(xml.getBytes(Const.XML_ENCODING));
dos.close();
} catch (IOException e) {
throw new KettleException(e);
}
}
示例6: saveMeta
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public boolean saveMeta(EngineMetaInterface meta, String fname) {
meta.setFilename(fname);
if (Const.isEmpty(meta.getName()) || delegates.jobs.isDefaultJobName(meta.getName())) {
meta.nameFromFilename();
}
boolean saved = false;
try {
String xml = XMLHandler.getXMLHeader() + meta.getXML();
DataOutputStream dos = new DataOutputStream(KettleVFS.getOutputStream(fname, false));
dos.write(xml.getBytes(Const.XML_ENCODING));
dos.close();
saved = true;
// Handle last opened files...
props.addLastFile(meta.getFileType(), fname, null, false, null); //$NON-NLS-1$
saveSettings();
addMenuLast();
if (log.isDebug())
log.logDebug(toString(), Messages.getString("Spoon.Log.FileWritten") + " [" + fname + "]"); // "File
// written
// to
meta.setFilename(fname);
meta.clearChanged();
setShellText();
} catch (Exception e) {
if (log.isDebug())
log.logDebug(toString(), Messages.getString("Spoon.Log.ErrorOpeningFileForWriting") + e.toString());// "Error opening file for writing! --> "
new ErrorDialog(shell, Messages.getString("Spoon.Dialog.ErrorSavingFile.Title"), Messages.getString("Spoon.Dialog.ErrorSavingFile.Message") + Const.CR + e.toString(), e);
}
return saved;
}
示例7: save
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public void save(RepositoryElementInterface repositoryElement, String versionComment, ProgressMonitorListener monitor, ObjectId parentId, boolean used) throws KettleException {
try {
if (!(repositoryElement instanceof XMLInterface) && !(repositoryElement instanceof SharedObjectInterface)) {
throw new KettleException("Class ["+repositoryElement.getClass().getName()+"] needs to implement the XML Interface in order to save it to disk");
}
if (!Const.isEmpty(versionComment)) {
insertLogEntry(versionComment);
}
ObjectId objectId = new StringObjectId(calcObjectId(repositoryElement));
FileObject fileObject = getFileObject(repositoryElement);
String xml = ((XMLInterface)repositoryElement).getXML();
OutputStream os = KettleVFS.getOutputStream(fileObject, false);
os.write(xml.getBytes(Const.XML_ENCODING));
os.close();
if (repositoryElement instanceof ChangedFlagInterface) {
((ChangedFlagInterface)repositoryElement).clearChanged();
}
// See if the element was already saved in the repository.
// If the object ID is different, then we created an extra copy.
// If so, we need to now remove the old file to prevent us from having multiple copies.
//
if (repositoryElement.getObjectId()!=null && !repositoryElement.getObjectId().equals(objectId)) {
delObject(repositoryElement.getObjectId());
}
repositoryElement.setObjectId(objectId);
} catch(Exception e) {
throw new KettleException("Unable to save repository element ["+repositoryElement+"] to XML file : "+calcFilename(repositoryElement), e);
}
}
示例8: insertLogEntry
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public ObjectId insertLogEntry(String description) throws KettleException {
String logfile = calcDirectoryName(null)+LOG_FILE;
try {
OutputStream outputStream = KettleVFS.getOutputStream(logfile, true);
outputStream.write(description.getBytes());
outputStream.close();
return new StringObjectId(logfile);
} catch (IOException e) {
throw new KettleException("Unable to write log entry to file ["+logfile+"]");
}
}
示例9: saveMeta
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public boolean saveMeta(EngineMetaInterface meta, String fname) {
meta.setFilename(fname);
if (Const.isEmpty(meta.getName()) || delegates.jobs.isDefaultJobName(meta.getName()) || delegates.trans.isDefaultTransformationName(meta.getName())) {
meta.nameFromFilename();
}
boolean saved = false;
try {
String xml = XMLHandler.getXMLHeader() + meta.getXML();
DataOutputStream dos = new DataOutputStream(KettleVFS.getOutputStream(fname, false));
dos.write(xml.getBytes(Const.XML_ENCODING));
dos.close();
saved = true;
// Handle last opened files...
props.addLastFile(meta.getFileType(), fname, null, false, null); //$NON-NLS-1$
saveSettings();
addMenuLast();
if (log.isDebug())
log.logDebug(BaseMessages.getString(PKG, "Spoon.Log.FileWritten") + " [" + fname + "]"); // "File
// written
// to
meta.setFilename(fname);
meta.clearChanged();
setShellText();
} catch (Exception e) {
if (log.isDebug())
log.logDebug(BaseMessages.getString(PKG, "Spoon.Log.ErrorOpeningFileForWriting") + e.toString());// "Error opening file for writing! --> "
new ErrorDialog(shell, BaseMessages.getString(PKG, "Spoon.Dialog.ErrorSavingFile.Title"), BaseMessages.getString(
PKG, "Spoon.Dialog.ErrorSavingFile.Message")
+ Const.CR + e.toString(), e);
}
return saved;
}
示例10: getOutputStream
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
protected OutputStream getOutputStream( String vfsFilename, VariableSpace space, boolean append )
throws KettleFileException {
return KettleVFS.getOutputStream( vfsFilename, space, append );
}
示例11: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r)
{
if (r!=null)
{
data.buffer.add(r); // Save row
}
// Time to write to disk: buffer in core is full!
if ( data.buffer.size()==meta.getCacheSize() // Buffer is full: dump to disk
|| (data.files.size()>0 && r==null && data.buffer.size()>0) // No more records: join from disk
)
{
// Then write them to disk...
DataOutputStream dos;
GZIPOutputStream gzos;
int p;
try
{
FileObject fileObject=KettleVFS.createTempFile(meta.getPrefix(), ".tmp", environmentSubstitute(meta.getDirectory()));
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject,false);
if (meta.getCompress())
{
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos=new DataOutputStream(gzos);
}
else
{
dos = new DataOutputStream(outputStream);
gzos = null;
}
// How many records do we have?
dos.writeInt(data.buffer.size());
for (p=0;p<data.buffer.size();p++)
{
// Just write the data, nothing else
rowMeta.writeData(dos, (Object[])data.buffer.get(p));
}
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
{
gzos.close(); // close gzip stream
}
outputStream.close(); // close file stream
}
catch(Exception e)
{
logError("Error processing tmp-file: "+e.toString());
return false;
}
data.buffer.clear();
}
return true;
}
示例12: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r) throws KettleException{
if (r != null){
data.buffer.add(r);
data.freeCounter++;
boolean doWrite = data.buffer.size() == data.writeSize; // Buffer is full: dump to disk
doWrite |= data.files.size() > 0 && r == null && data.buffer.size() > 0; // No more records: join from disk
doWrite |= data.freeMemoryPctLimit > 0 && data.freeMemoryPct < data.freeMemoryPctLimit && data.buffer.size() >= data.minWriteSize;
if (doWrite){
DataOutputStream dos;
GZIPOutputStream gzos;
try{
FileObject fileObject=KettleVFS.createTempFile("spatialAnalysis", ".tmp", environmentSubstitute(Messages.getString("System.Button.Browse")));
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject, false);
if (data.compressFiles){
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos = new DataOutputStream(gzos);
}else{
dos = new DataOutputStream(new BufferedOutputStream(outputStream, 50000));
gzos = null;
}
// How many records do we have left?
data.bufferSizes.add( data.buffer.size() );
for (int p = 0; p < data.buffer.size(); p++){
data.outputRowMeta.writeData(dos, data.buffer.get(p));
}
if (data.writeSize < 0 && data.buffer.size() > data.minWriteSize){
data.minWriteSize = data.buffer.size(); // if we did it once, we can do it again.
// Memory usage goes up over time, even with garbage collection
// We need pointers, file handles, etc.
// As such, we're going to lower the min sort size a bit
data.minWriteSize = (int)Math.round((double)data.minWriteSize * 0.90);
}
// Clear the list
data.buffer.clear();
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
gzos.close(); // close gzip stream
outputStream.close(); // close file stream
// How much memory do we have left?
data.freeMemoryPct = Const.getPercentageFreeMemory();
data.freeCounter = 0;
if (data.writeSize <= 0 && log.isDetailed())
logDetailed("Available memory : " + data.freeMemoryPct + "%");
}catch(Exception e){
throw new KettleException("Error processing temp-file!", e);
}
data.getBufferIndex=0;
}
return true;
}
return false;
}
示例13: Log4jFileAppender
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public Log4jFileAppender(FileObject file) throws IOException
{
this.file = file;
fileOutputStream = KettleVFS.getOutputStream(file, false);
}
示例14: openNewFile
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public boolean openNewFile()
{
boolean retval=false;
data.writer=null;
try
{
String filename = buildFilename();
if (meta.AddToResult())
{
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(filename, getTransMeta()), getTransMeta().getName(), getStepname());
resultFile.setComment("This file was created with a text file output step");
addResultFile(resultFile);
}
OutputStream outputStream;
if(log.isDetailed()) logDetailed("Opening output stream in nocompress mode");
OutputStream fos = KettleVFS.getOutputStream(filename, getTransMeta(), meta.isFileAppended());
outputStream=fos;
if(log.isDetailed()) logDetailed("Opening output stream in default encoding");
data.writer = new OutputStreamWriter(new BufferedOutputStream(outputStream, 5000));
if (!Const.isEmpty(meta.getEncoding()))
{
if(log.isBasic()) logDetailed("Opening output stream in encoding: "+meta.getEncoding());
data.writer = new OutputStreamWriter(new BufferedOutputStream(outputStream, 5000), environmentSubstitute(meta.getEncoding()));
}
else
{
if(log.isBasic()) logDetailed("Opening output stream in default encoding");
data.writer = new OutputStreamWriter(new BufferedOutputStream(outputStream, 5000));
}
if(log.isDetailed()) logDetailed("Opened new file with name ["+filename+"]");
data.splitnr++;
retval=true;
}
catch(Exception e)
{
logError("Error opening new file : "+e.toString());
}
return retval;
}
示例15: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r)
{
if (r!=null)
{
data.buffer.add(r); // Save row
}
// Time to write to disk: buffer in core is full!
if ( data.buffer.size()==meta.getCacheSize() // Buffer is full: dump to disk
|| (data.files.size()>0 && r==null && data.buffer.size()>0) // No more records: join from disk
)
{
// Then write them to disk...
DataOutputStream dos;
GZIPOutputStream gzos;
int p;
try
{
FileObject fileObject=KettleVFS.createTempFile(meta.getPrefix(), ".tmp", environmentSubstitute(meta.getDirectory()), getTransMeta());
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject,false);
if (meta.getCompress())
{
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos=new DataOutputStream(gzos);
}
else
{
dos = new DataOutputStream(outputStream);
gzos = null;
}
// How many records do we have?
dos.writeInt(data.buffer.size());
for (p=0;p<data.buffer.size();p++)
{
// Just write the data, nothing else
rowMeta.writeData(dos, (Object[])data.buffer.get(p));
}
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
{
gzos.close(); // close gzip stream
}
outputStream.close(); // close file stream
}
catch(Exception e)
{
logError("Error processing tmp-file: "+e.toString());
return false;
}
data.buffer.clear();
}
return true;
}