本文整理汇总了Java中org.pentaho.di.core.vfs.KettleVFS.createTempFile方法的典型用法代码示例。如果您正苦于以下问题:Java KettleVFS.createTempFile方法的具体用法?Java KettleVFS.createTempFile怎么用?Java KettleVFS.createTempFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.pentaho.di.core.vfs.KettleVFS
的用法示例。
在下文中一共展示了KettleVFS.createTempFile方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createFileAppender
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
/**
* Create a file appender
* @param filename The (VFS) filename (URL) to write to.
* @param exact is this an exact filename of a filename to be stored in "java.io.tmp"
* @return A new file appender
* @throws KettleFileException In case there is a problem opening the file.
*/
public static final Log4jFileAppender createFileAppender(String filename, boolean exact) throws KettleFileException
{
try
{
FileObject file;
if (!exact)
{
file = KettleVFS.createTempFile(filename, ".log", System.getProperty("java.io.tmpdir"));
}
else
{
file = KettleVFS.getFileObject(filename);
}
Log4jFileAppender appender = new Log4jFileAppender(file);
appender.setLayout(new Log4jKettleLayout(true));
appender.setName(LogWriter.createFileAppenderName(filename, exact));
return appender;
}
catch(IOException e)
{
throw new KettleFileException("Unable to add Kettle file appender to Log4J", e);
}
}
示例2: prepareJarFiles
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private URL[] prepareJarFiles(FileObject zipFile) throws Exception {
FileInputList fileList = FileInputList.createFileList(this, new String[] { "zip:" + zipFile.toString(), }, // zip:file:///tmp/foo.zip
new String[] { ".*\\.jar$", }, // Include mask: only jar files
new String[] { ".*classpath\\.jar$", }, // Exclude mask: only jar files
new String[] { "Y", }, // File required
new boolean[] { true, } // Search sub-directories
);
List<URL> files = new ArrayList<URL>();
// Copy the jar files in the temp folder...
//
for (FileObject file : fileList.getFiles()) {
FileObject jarfilecopy = KettleVFS.createTempFile(file.getName().getBaseName(), ".jar", environmentSubstitute("${java.io.tmpdir}"));
jarfilecopy.copyFrom(file, new AllFileSelector());
files.add(jarfilecopy.getURL());
}
return files.toArray(new URL[files.size()]);
}
示例3: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r)
{
if (r!=null)
{
data.buffer.add(r); // Save row
}
// Time to write to disk: buffer in core is full!
if ( data.buffer.size()==meta.getCacheSize() // Buffer is full: dump to disk
|| (data.files.size()>0 && r==null && data.buffer.size()>0) // No more records: join from disk
)
{
// Then write them to disk...
DataOutputStream dos;
GZIPOutputStream gzos;
int p;
try
{
FileObject fileObject=KettleVFS.createTempFile(meta.getPrefix(), ".tmp", environmentSubstitute(meta.getDirectory()));
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject,false);
if (meta.getCompress())
{
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos=new DataOutputStream(gzos);
}
else
{
dos = new DataOutputStream(outputStream);
gzos = null;
}
// How many records do we have?
dos.writeInt(data.buffer.size());
for (p=0;p<data.buffer.size();p++)
{
// Just write the data, nothing else
rowMeta.writeData(dos, (Object[])data.buffer.get(p));
}
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
{
gzos.close(); // close gzip stream
}
outputStream.close(); // close file stream
}
catch(Exception e)
{
logError("Error processing tmp-file: "+e.toString());
return false;
}
data.buffer.clear();
}
return true;
}
示例4: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r) throws KettleException{
if (r != null){
data.buffer.add(r);
data.freeCounter++;
boolean doWrite = data.buffer.size() == data.writeSize; // Buffer is full: dump to disk
doWrite |= data.files.size() > 0 && r == null && data.buffer.size() > 0; // No more records: join from disk
doWrite |= data.freeMemoryPctLimit > 0 && data.freeMemoryPct < data.freeMemoryPctLimit && data.buffer.size() >= data.minWriteSize;
if (doWrite){
DataOutputStream dos;
GZIPOutputStream gzos;
try{
FileObject fileObject=KettleVFS.createTempFile("spatialAnalysis", ".tmp", environmentSubstitute(Messages.getString("System.Button.Browse")));
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject, false);
if (data.compressFiles){
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos = new DataOutputStream(gzos);
}else{
dos = new DataOutputStream(new BufferedOutputStream(outputStream, 50000));
gzos = null;
}
// How many records do we have left?
data.bufferSizes.add( data.buffer.size() );
for (int p = 0; p < data.buffer.size(); p++){
data.outputRowMeta.writeData(dos, data.buffer.get(p));
}
if (data.writeSize < 0 && data.buffer.size() > data.minWriteSize){
data.minWriteSize = data.buffer.size(); // if we did it once, we can do it again.
// Memory usage goes up over time, even with garbage collection
// We need pointers, file handles, etc.
// As such, we're going to lower the min sort size a bit
data.minWriteSize = (int)Math.round((double)data.minWriteSize * 0.90);
}
// Clear the list
data.buffer.clear();
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
gzos.close(); // close gzip stream
outputStream.close(); // close file stream
// How much memory do we have left?
data.freeMemoryPct = Const.getPercentageFreeMemory();
data.freeCounter = 0;
if (data.writeSize <= 0 && log.isDetailed())
logDetailed("Available memory : " + data.freeMemoryPct + "%");
}catch(Exception e){
throw new KettleException("Error processing temp-file!", e);
}
data.getBufferIndex=0;
}
return true;
}
return false;
}
示例5: addBuffer
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
private boolean addBuffer(RowMetaInterface rowMeta, Object[] r)
{
if (r!=null)
{
data.buffer.add(r); // Save row
}
// Time to write to disk: buffer in core is full!
if ( data.buffer.size()==meta.getCacheSize() // Buffer is full: dump to disk
|| (data.files.size()>0 && r==null && data.buffer.size()>0) // No more records: join from disk
)
{
// Then write them to disk...
DataOutputStream dos;
GZIPOutputStream gzos;
int p;
try
{
FileObject fileObject=KettleVFS.createTempFile(meta.getPrefix(), ".tmp", environmentSubstitute(meta.getDirectory()), getTransMeta());
data.files.add(fileObject); // Remember the files!
OutputStream outputStream = KettleVFS.getOutputStream(fileObject,false);
if (meta.getCompress())
{
gzos = new GZIPOutputStream(new BufferedOutputStream(outputStream));
dos=new DataOutputStream(gzos);
}
else
{
dos = new DataOutputStream(outputStream);
gzos = null;
}
// How many records do we have?
dos.writeInt(data.buffer.size());
for (p=0;p<data.buffer.size();p++)
{
// Just write the data, nothing else
rowMeta.writeData(dos, (Object[])data.buffer.get(p));
}
// Close temp-file
dos.close(); // close data stream
if (gzos != null)
{
gzos.close(); // close gzip stream
}
outputStream.close(); // close file stream
}
catch(Exception e)
{
logError("Error processing tmp-file: "+e.toString());
return false;
}
data.buffer.clear();
}
return true;
}
示例6: run
import org.pentaho.di.core.vfs.KettleVFS; //导入方法依赖的package包/类
public void run() throws Exception {
FileObject tempFile = KettleVFS.createTempFile("datacleaner", ".kettlestream",
System.getProperty("java.io.tmpdir"));
filename = KettleVFS.getFilename(tempFile);
outputStream = new DataOutputStream(KettleVFS.getOutputStream(tempFile, false));
log.logBasic("DataCleaner temp file created: " + filename);
RowMetaInterface rowMeta = transMeta.getStepFields(stepMeta);
log.logBasic("Opened an output stream to DataCleaner.");
// Write the transformation name, the step name and the row metadata
// first...
//
outputStream.writeUTF(transMeta.getName());
log.logBasic("wrote the transformation name.");
outputStream.writeUTF(stepMeta.getName());
log.logBasic("wrote the step name.");
rowMeta.writeMeta(outputStream);
log.logBasic("Wrote the row metadata");
// Add a row listener to the selected step...
//
List<StepInterface> steps = trans.findBaseSteps(stepMeta.getName());
// Just do one step copy for the time being...
//
StepInterface step = steps.get(0);
step.addRowListener(this);
log.logBasic("Added the row listener to step: " + step.toString());
// Now start the transformation...
//
trans.startThreads();
log.logBasic("Started the transformation to profile... waiting until the transformation has finished");
trans.waitUntilFinished();
log.logBasic("The transformation to profile finished.");
}