当前位置: 首页>>代码示例>>Java>>正文


Java LogLevel类代码示例

本文整理汇总了Java中com.ibm.streams.operator.logging.LogLevel的典型用法代码示例。如果您正苦于以下问题:Java LogLevel类的具体用法?Java LogLevel怎么用?Java LogLevel使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LogLevel类属于com.ibm.streams.operator.logging包,在下文中一共展示了LogLevel类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkInputPortSchema

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@ContextCheck()
public static void checkInputPortSchema(OperatorContextChecker checker) {
	List<StreamingInput<Tuple>> streamingInputs = checker.getOperatorContext().getStreamingInputs();
	if (streamingInputs.size() > 0) {
		StreamSchema inputSchema = streamingInputs.get(0).getStreamSchema();
		if (inputSchema.getAttributeCount() > 1) {
			checker.setInvalidContext(
					Messages.getString("HDFS_DS_INVALID_INPUT_PORT"), 
					null);
		}

		if (inputSchema.getAttribute(0).getType().getMetaType() != MetaType.RSTRING) {
			checker.setInvalidContext(Messages.getString("HDFS_DS_INVALID_ATTRIBUTE", 
					 inputSchema.getAttribute(0).getType().getMetaType()), null);
		}

		ConsistentRegionContext crContext = checker.getOperatorContext().getOptionalContext(
				ConsistentRegionContext.class);
		if (crContext != null) {
			LOGGER.log( LogLevel.WARNING, Messages.getString("HDFS_DS_CONSISTENT_REGION_NOT_SUPPORTED")); 
		}
	}

}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:25,代码来源:HDFS2DirectoryScan.java

示例2: run

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@Override
public void run() {
	try {
		out.write(flushBuffer, 0, bufferPosition);	
		
		if (newline && fNewline.length > 0)
			out.write(fNewline, 0, fNewline.length);
		
		// force HDFS output stream to flush
		if (out instanceof FSDataOutputStream)
		{
			((FSDataOutputStream)out).hflush();
		}
		else {
			out.flush();
		}
	} catch (IOException e) {
		LOGGER.log(LogLevel.ERROR, Messages.getString("HDFS_ASYNC_UNABLE_WRITE_TO_STREAMS"), e); 
	}		
	finally {
		if (isAddBuffer)
			addBuffer();
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:25,代码来源:AsyncBufferWriter.java

示例3: initExServiceAndBuffer

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
private void initExServiceAndBuffer(int size, ThreadFactory threadFactory) {

		synchronized (exServiceLock) {
			exService = Executors.newSingleThreadExecutor(threadFactory);
			bufferQueue = new LinkedBlockingQueue<byte[]>(BUFFER_QUEUE_SIZE);
			try {
				for (int i = 0; i < BUFFER_QUEUE_SIZE; i++) {
					bufferQueue.put(new byte[size]);
				}

				// take one buffer, two left in the queue
				buffer = bufferQueue.take();
			} catch (InterruptedException e) {
				LOGGER.log(LogLevel.ERROR,
						Messages.getString("HDFS_ASYNC_INVALID_BUFFER_QUEUE"), e); 
			}
		}
	}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:19,代码来源:AsyncBufferWriter.java

示例4: close

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@Override
public void close() throws IOException {		
	synchronized(exServiceLock) {
	if (!isClosed)
	{
		isClosed = true;
		
		// shut down the execution service, so no other flush runnable can be scheduled 
		// and wait for any flush job currently scheduled or running to finish
		exService.shutdown();
		try {
			exService.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
		} catch (InterruptedException e) {
			LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_ASYNC_SERVICE_SHUTDOWN_INTERRUPTED"), e); 
		}finally {
			
			// do final flushing of buffer
			flushNow();
			out.close();
			bufferQueue.clear();
		}
	}		
}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:25,代码来源:AsyncBufferWriter.java

示例5: flush

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@Override
public void flush() throws IOException {

	if (buffer.length > 0) {
		synchronized (exServiceLock) {
			FlushRunnable runnable = new FlushRunnable(buffer, true,
					position, false);
			exService.execute(runnable);

			try {
				if (!isClosed)
					buffer = bufferQueue.take();
				position = 0;
			} catch (InterruptedException e) {
				LOGGER.log(LogLevel.ERROR,
						Messages.getString("HDFS_ASYNC_UNABLE_GET_BUFFER_QUEUE"), e); 
			}
		}
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:21,代码来源:AsyncBufferWriter.java

示例6: flushAll

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
public void flushAll() throws IOException
{
	synchronized(exServiceLock) {
		// shut down the execution service, so no other flush runnable can be scheduled 
		// and wait for any flush job currently scheduled or running to finish
		exService.shutdown();
		try {
			exService.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
		} catch (InterruptedException e) {
			LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_ASYNC_SERVICE_SHUTDOWN_INTERRUPTED"), e); 
		}finally {

			// do final flushing of buffer
			flushNow();
			
			// after flushing, recreate exService
			initExServiceAndBuffer(size, fThreadFactory);
		}
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:21,代码来源:AsyncBufferWriter.java

示例7: process

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
protected void process() throws Exception {
	
	fProcessThreadDone = false;
	if (fInitDelay > 0) {
		try {
			Thread.sleep((long) (fInitDelay * 1000));
		} catch (InterruptedException e) {
			LOGGER.log(LogLevel.INFO, Messages.getString("HDFS_SOURCE_INIT_DELAY_INTERRUPTED")); 
		}
	}
	try {
		if (!shutdownRequested) {
			processFile(fFileName);
		}
	}finally {
		fProcessThreadDone = true;
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:19,代码来源:HDFS2FileSource.java

示例8: commandPublish

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
public static void commandPublish(SPLStream commandStream) {
    if (!commandStream.getSchema().equals(Schemas.DEVICE_CMD))
    {
    	l.log(LogLevel.ERROR, Messages.getString("SCHEMA_IS_INVALID"), new Object[]{commandStream.getSchema().getLanguageType()}); //$NON-NLS-1$
        throw new IllegalArgumentException("Schema is invalid: " + commandStream.getSchema().getLanguageType()); //$NON-NLS-1$
    }
    SPL.invokeSink("com.ibm.streamsx.iot::CommandPublish", commandStream, null); //$NON-NLS-1$
}
 
开发者ID:IBMStreams,项目名称:streamsx.iot,代码行数:9,代码来源:IotSPLStreams.java

示例9: test

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@Override
public boolean test(T tuple) {
    System.err.println("CrashAt:" + counter + " -- " + new Date());
    if (counter++ == crashAt) {
        Logger.getAnonymousLogger().log(LogLevel.INFO, "Intentional crash!");
        System.err.println("Intentional crash!");
        System.exit(1);
    }
    return true;
}
 
开发者ID:IBMStreams,项目名称:streamsx.topology,代码行数:11,代码来源:CheckpointTest.java

示例10: addBuffer

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
private void addBuffer() {
	try {					
		if (!isClosed && bufferQueue.size() <= BUFFER_QUEUE_SIZE)
			bufferQueue.put(new byte[size]);
	} catch (InterruptedException e) {
		LOGGER.log(LogLevel.INFO, Messages.getString("HDFS_ASYNC_UNABLE_ADD_TO_QUEUE"), e); 
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:9,代码来源:AsyncBufferWriter.java

示例11: checkStrictMode

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
public void checkStrictMode(OperatorContext context) throws Exception {
	boolean checked = false;
	// directory can be empty

	// When a directory parameter is not specified, check if control input
	// port
	// is present. Warn if so, else throw an exception
	if (!context.getParameterNames().contains("directory")) { 
		// if strict mode, directory can be empty if we have an input stream
		if (context.getNumberOfStreamingInputs() == 0) {
			throw new Exception("directory parameter needs to be specified when control input port is not present.");
		} else {
			// warn user that this may be a problem.
			LOGGER.log(LogLevel.WARN,
					Messages.getString("HDFS_DS_NOT_SPECIFIED_DIR_PARAM")); 
			checked = true;
		}
	}
	if (isStrictMode) {
		if (!checked) {
			if (directory.isEmpty()) {
				throw new Exception(Messages.getString("HDFS_DS_EMPTY_DIRECTORY_STRICT_MODE")); 
			} else if (!getHdfsClient().exists(directory)) {
				throw new Exception(Messages.getString("HDFS_DS_DIRECTORY_NOT_EXIST_STRICT_MODE", directory));
			} else if (!getHdfsClient().isDirectory(directory)) {
				throw new Exception(Messages.getString("HDFS_DS_IS_NOT_A_DIRECTORY", directory));
			}
		}
	} else {
		if (!checked) {
			if (directory.isEmpty()) {
				if (context.getNumberOfStreamingInputs() == 1) {
					LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_DS_EMPTY_DIRECTORY_PARAM")); 
					directory = ""; 
				} else {
					throw new Exception(Messages.getString("HDFS_DS_EMPTY_DIRECTORY_NOT_CONTROL_PORT")); 
				}
			} else if (!getHdfsClient().exists(directory)) {
				// TRACE.warning("Directory specified does not exist: " +
				// directory);
				LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_DS_DIRECTORY_NOT_EXIST", directory)); 
			} else if (!getHdfsClient().isDirectory(directory)) {
				if (context.getNumberOfStreamingInputs() == 1) {
					// throw new
					// Exception("directory parameter value "+directory+" does not refer to a valid directory");
					LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_DS_IS_NOT_A_DIRECTORY", directory)); 
					directory = "";// so that it does not break in process 
				} else {
					throw new Exception(Messages.getString("HDFS_DS_IS_NOT_A_DIRECTORY", directory));
				}
			} else {
				try {
					scanDirectory(directory);
				} catch (IOException ex) {
					if (context.getNumberOfStreamingInputs() == 1) {
						LOGGER.log(LogLevel.WARN, ex.getMessage());
						directory = ""; 
					} else {
						throw ex;
					}
				}
			}
		}
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:66,代码来源:HDFS2DirectoryScan.java

示例12: process

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
@Override
public void process(StreamingInput<Tuple> stream, Tuple tuple) throws Exception {
	String newDir = tuple.getString(0);
	boolean dirExists = true;

	if (TRACE.isLoggable(TraceLevel.INFO))
		TRACE.log(TraceLevel.INFO, "Control signal received: " + newDir); 

	if (newDir != null) {
		synchronized (dirLock) {

			TRACE.log(TraceLevel.DEBUG, "Acquired dirLock for control signal");

			if (isStrictMode) {
				if (newDir != null && !getHdfsClient().exists(newDir)) {
					dirExists = false;
					throw new Exception("Directory specified from control input port does not exist: " + newDir);
				} else if (newDir != null && !getHdfsClient().isDirectory(newDir)) {
					dirExists = false;
					throw new Exception("Directory specified from control input port is not a valid directory: "
							+ newDir);
				} else if (newDir != null && newDir.isEmpty()) {
					dirExists = false;
					throw new Exception("Directory received from input port is empty.");
				}
			} else {

				if (newDir != null && newDir.isEmpty()) {
					dirExists = false;
					// if directory is empty and number of input port is
					// zero, throw exception
					// warn user that this may be a problem.
					LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_DS_EMPTY_DIRECTORY_INPUT_PORT")); 
				} else if (newDir != null && !getHdfsClient().exists(newDir)) {
					dirExists = false;
					LOGGER.log(LogLevel.WARN, Messages.getString("HDFS_DS_DIRECTORY_NOT_EXIST_INPUT_PORT", newDir));
								} else if (newDir != null && !getHdfsClient().isDirectory(newDir)) {
					dirExists = false;
					LOGGER.log(LogLevel.WARN,
							Messages.getString("HDFS_DS_INVALID_DIRECTORY_INPUT_PORT", newDir)); 
				} else if (newDir != null) {
					try {
						scanDirectory(newDir);
					} catch (IOException e) {
						dirExists = false;
						LOGGER.log(LogLevel.WARN, e.getMessage());
					}
				}
			}

			if (newDir != null && !newDir.isEmpty() && !directory.equals(newDir) && dirExists) {
				setDirectory(newDir);
			}
			// always notify to allow user to send a signal
			// to force a scan immediately.
			dirLock.notify();
		}
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:60,代码来源:HDFS2DirectoryScan.java

示例13: processFile

import com.ibm.streams.operator.logging.LogLevel; //导入依赖的package包/类
private void processFile(String filename) throws Exception {
	if (LOGGER.isLoggable(LogLevel.INFO)) {
		LOGGER.log(LogLevel.INFO, Messages.getString("HDFS_SOURCE_PROCESS_FILE", filename)); 
	}
	IHdfsClient hdfsClient = getHdfsClient();
	
	try {
		if (fCrContext != null) {
			fCrContext.acquirePermit();
		}
		openFile(hdfsClient, filename);

	} finally {
		if (fCrContext != null) {
			fCrContext.releasePermit();
		}
	}
	
	if (fDataStream == null) {
		LOGGER.log(LogLevel.ERROR, Messages.getString("HDFS_SOURCE_NOT_OPENING_FILE", filename)); 
		return;
	}
	
	nFilesOpened.incrementValue(1);
	StreamingOutput<OutputTuple> outputPort = getOutput(0);
	try {
		if (fBinaryFile) {
			doReadBinaryFile(fDataStream, outputPort);
		} else {
			doReadTextFile(fDataStream, outputPort, filename);
		}
	} catch (IOException e) {
		LOGGER.log(LogLevel.ERROR,
				Messages.getString("HDFS_SOURCE_EXCEPTION_READ_FILE"), e.getMessage()); 
	} finally {
		closeFile();
	}
	outputPort.punctuate(Punctuation.WINDOW_MARKER);
	
	if (fCrContext != null && fCrContext.isStartOfRegion() && fCrContext.isTriggerOperator())
	{
		try 
		{
			fCrContext.acquirePermit();					
			fCrContext.makeConsistent();
		}
		finally {
			fCrContext.releasePermit();
		}
	}
}
 
开发者ID:IBMStreams,项目名称:streamsx.hdfs,代码行数:52,代码来源:HDFS2FileSource.java


注:本文中的com.ibm.streams.operator.logging.LogLevel类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。