本文整理汇总了Java中com.ibm.streams.operator.StreamSchema.getAttributeCount方法的典型用法代码示例。如果您正苦于以下问题:Java StreamSchema.getAttributeCount方法的具体用法?Java StreamSchema.getAttributeCount怎么用?Java StreamSchema.getAttributeCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.ibm.streams.operator.StreamSchema
的用法示例。
在下文中一共展示了StreamSchema.getAttributeCount方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkInputPortSchema
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
@ContextCheck()
public static void checkInputPortSchema(OperatorContextChecker checker) {
List<StreamingInput<Tuple>> streamingInputs = checker.getOperatorContext().getStreamingInputs();
if (streamingInputs.size() > 0) {
StreamSchema inputSchema = streamingInputs.get(0).getStreamSchema();
if (inputSchema.getAttributeCount() > 1) {
checker.setInvalidContext(
Messages.getString("HDFS_DS_INVALID_INPUT_PORT"),
null);
}
if (inputSchema.getAttribute(0).getType().getMetaType() != MetaType.RSTRING) {
checker.setInvalidContext(Messages.getString("HDFS_DS_INVALID_ATTRIBUTE",
inputSchema.getAttribute(0).getType().getMetaType()), null);
}
ConsistentRegionContext crContext = checker.getOperatorContext().getOptionalContext(
ConsistentRegionContext.class);
if (crContext != null) {
LOGGER.log( LogLevel.WARNING, Messages.getString("HDFS_DS_CONSISTENT_REGION_NOT_SUPPORTED"));
}
}
}
示例2: checkOutputPortSchema
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
@ContextCheck(compile = true)
public static void checkOutputPortSchema(OperatorContextChecker checker) throws Exception {
StreamSchema outputSchema = checker.getOperatorContext().getStreamingOutputs().get(0).getStreamSchema();
if (outputSchema.getAttributeCount() != 1) {
checker.setInvalidContext(
Messages.getString("HDFS_DS_INVALID_OUTPUT_PORT"),
null);
}
if (outputSchema.getAttribute(0).getType().getMetaType() != MetaType.RSTRING) {
checker.setInvalidContext(Messages.getString("HDFS_DS_INVALID_ATTRIBUTE")
+ outputSchema.getAttribute(0).getType().getMetaType(), null);
}
}
示例3: checkOutputPortSchema
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
@ContextCheck(compile = true)
public static void checkOutputPortSchema(OperatorContextChecker checker)
throws Exception {
StreamSchema outputSchema = checker.getOperatorContext()
.getStreamingOutputs().get(0).getStreamSchema();
// check that number of attributes is 1
if (outputSchema.getAttributeCount() != 1) {
checker.setInvalidContext(
Messages.getString("HDFS_SOURCE_INVALID_OUTPUT"),
null);
}
if (outputSchema.getAttribute(0).getType().getMetaType() != MetaType.RSTRING
&& outputSchema.getAttribute(0).getType().getMetaType() != MetaType.USTRING
&& outputSchema.getAttribute(0).getType().getMetaType() != MetaType.BLOB) {
checker.setInvalidContext(
Messages.getString("HDFS_SOURCE_INVALID_ATTR_TYPE", outputSchema.getAttribute(0).getType().getMetaType()),
null);
}
if (MetaType.BLOB != outputSchema.getAttribute(0).getType()
.getMetaType()
&& checker.getOperatorContext().getParameterNames()
.contains(BLOCKSIZE_PARAM)) {
checker.setInvalidContext(Messages.getString("HDFS_SOURCE_INVALID_BLOCKSIZE_PARAM", "BLOCKSIZE_PARAM"),
null);
}
}
示例4: checkInputPortSchema
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
@ContextCheck(compile = true)
public static void checkInputPortSchema(OperatorContextChecker checker)
throws Exception {
List<StreamingInput<Tuple>> streamingInputs = checker
.getOperatorContext().getStreamingInputs();
// check that we have max of one input port
if (streamingInputs.size() > 1) {
throw new Exception("HDFSFileSource can only have one input port");
}
// if we have an input port
if (streamingInputs.size() == 1) {
StreamSchema inputSchema = checker.getOperatorContext()
.getStreamingInputs().get(0).getStreamSchema();
// check that number of attributes is 1
if (inputSchema.getAttributeCount() != 1) {
checker.setInvalidContext(
Messages.getString("HDFS_SOURCE_INVALID_FILENAME_ATTR"),
null);
}
// check that the attribute type must be a rstring
if (MetaType.RSTRING != inputSchema.getAttribute(0).getType()
.getMetaType()) {
checker.setInvalidContext(
Messages.getString("HDFS_SOURCE_INVALID_STRING_ATTR", inputSchema.getAttribute(0).getType().getMetaType()),
null);
}
}
}
示例5: initialize
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param operatorContext
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext operatorContext) throws Exception {
// Must call super.initialize(context) to correctly setup an operator.
super.initialize(operatorContext);
LOGGER.log(TraceLevel.TRACE, "Operator " + operatorContext.getName() + " initializing in PE: "
+ operatorContext.getPE().getPEId() + " in Job: " + operatorContext.getPE().getJobId());
StreamSchema ssOp0 = getOutput(0).getStreamSchema();
StreamSchema ssIp0 = getInput(0).getStreamSchema();
// If no input JSON attribute specified, use default
if (inputJsonMessage == null) {
if (ssIp0.getAttributeCount() == 1) {
inputJsonMessage = ssIp0.getAttribute(0).getName();
} else {
inputJsonMessage = DEFAULT_INPUT_JSON_MSG_ATTRIBUTE;
}
}
LOGGER.log(TraceLevel.TRACE, "Input JSON message attribute: " + inputJsonMessage);
// If no output Avro message attribute specified, use default
if (outputAvroMessage == null) {
if (ssOp0.getAttributeCount() == 1) {
outputAvroMessage = ssOp0.getAttribute(0).getName();
} else {
outputAvroMessage = DEFAULT_OUTPUT_AVRO_MSG_ATTRIBUTE;
}
}
LOGGER.log(TraceLevel.TRACE, "Output Avro message attribute: " + outputAvroMessage);
// Get the Avro schema file to parse the Avro messages
LOGGER.log(TraceLevel.TRACE, "Retrieving and parsing Avro message schema file " + avroMessageSchemaFile);
InputStream avscInput = new FileInputStream(avroMessageSchemaFile);
Schema.Parser parser = new Schema.Parser();
messageSchema = parser.parse(avscInput);
LOGGER.log(TraceLevel.TRACE, "Embed Avro schema in generated output Avro message block: " + embedAvroSchema);
LOGGER.log(TraceLevel.TRACE, "Submit Avro message block when punctuation is received: " + submitOnPunct);
LOGGER.log(TraceLevel.TRACE, "Ignore parsing error: " + ignoreParsingError);
// submitOnPunct is only valid if Avro schema is embedded in the output
if (submitOnPunct && !embedAvroSchema)
throw new Exception(
"Parameter submitOnPunct can only be set to true if Avro schema is embedded in the output.");
// If Avro schema is embedded in the output, submitOnPunct is mandatory
if (embedAvroSchema && !submitOnPunct && tuplesPerMessage == 0 && bytesPerMessage == 0 && timePerMessage == 0)
throw new Exception("If Avro schema is embedded in the output, you must specify one of the thresholds when "
+ "the tuple must be submitted (submitOnPunct, bytesPerMessage, timePerMessage, tuplesPerMessage).");
// Prepare and initialize variables that don't change for every input
// record
jsonReader = new GenericDatumReader<GenericRecord>(messageSchema);
avroWriter = new GenericDatumWriter<GenericRecord>(messageSchema);
avroDataFileWriter = new DataFileWriter<GenericRecord>(avroWriter);
if (embedAvroSchema)
avroDataFileWriter.create(messageSchema, avroBlockByteArray);
numberOfBatchedMessages = 0;
LOGGER.log(TraceLevel.TRACE, "JSONToAvro operator initialized, ready to receive tuples");
}
示例6: initialize
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param operatorContext
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext operatorContext) throws Exception {
// Must call super.initialize(context) to correctly setup an operator.
super.initialize(operatorContext);
LOGGER.log(TraceLevel.TRACE, "Operator " + operatorContext.getName() + " initializing in PE: "
+ operatorContext.getPE().getPEId() + " in Job: " + operatorContext.getPE().getJobId());
StreamSchema ssOp0 = getOutput(0).getStreamSchema();
StreamSchema ssIp0 = getInput(0).getStreamSchema();
// If no input Avro message blob attribute specified, use default
if (inputAvroMessage == null) {
if (ssIp0.getAttributeCount() == 1) {
inputAvroMessage = ssIp0.getAttribute(0).getName();
} else {
inputAvroMessage = DEFAULT_INPUT_AVRO_MSG_ATTRIBUTE;
}
}
LOGGER.log(TraceLevel.TRACE, "Input Avro message attribute: " + inputAvroMessage);
// If no Avro key attribute specified, check if optional attribute is
// available in the input tuple
if (inputAvroKey == null) {
if (ssIp0.getAttribute(DEFAULT_INPUT_AVRO_KEY_ATTRIBUTE) != null)
inputAvroKey = DEFAULT_INPUT_AVRO_KEY_ATTRIBUTE;
}
if (inputAvroKey != null)
LOGGER.log(TraceLevel.TRACE, "Input Avro key attribute: " + inputAvroKey);
// If no output JSON message attribute specified, use default
if (outputJsonMessage == null) {
if (ssOp0.getAttributeCount() == 1) {
outputJsonMessage = ssOp0.getAttribute(0).getName();
} else {
outputJsonMessage = DEFAULT_OUTPUT_JSON_MSG_ATTRIBUTE;
}
}
LOGGER.log(TraceLevel.TRACE, "Output JSON message attribute: " + outputJsonMessage);
// If no JSON key attribute specified, check if optional attribute is
// available in the output tuple
if (outputJsonKey == null) {
if (ssIp0.getAttribute(DEFAULT_OUTPUT_JSON_KEY_ATTRIBUTE) != null)
outputJsonKey = DEFAULT_OUTPUT_JSON_KEY_ATTRIBUTE;
}
if (outputJsonKey != null)
LOGGER.log(TraceLevel.TRACE, "Output JSON key attribute: " + outputJsonKey);
// Get the Avro message schema file to parse the Avro messages
if (!avroMessageSchemaFile.isEmpty()) {
LOGGER.log(TraceLevel.TRACE, "Retrieving and parsing Avro message schema file " + avroMessageSchemaFile);
InputStream avscMessageInput = new FileInputStream(avroMessageSchemaFile);
messageSchema = new Schema.Parser().parse(avscMessageInput);
}
// Get the Avro key schema file to parse the Avro messages
if (!avroKeySchemaFile.isEmpty()) {
LOGGER.log(TraceLevel.TRACE, "Retrieving and parsing Avro key schema file " + avroKeySchemaFile);
InputStream avscKeyInput = new FileInputStream(avroKeySchemaFile);
keySchema = new Schema.Parser().parse(avscKeyInput);
}
// If the schema is embedded in the message, the schema file must not be
// specified
if (avroSchemaEmbedded && !avroMessageSchemaFile.isEmpty())
throw new Exception(
"Parameter avroMessageSchema cannot be specified if the schema is embedded in the message.");
LOGGER.log(TraceLevel.TRACE, "AvroToJSON operator initialized, ready to receive tuples");
}
示例7: initialize
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param operatorContext
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext operatorContext) throws Exception {
// Must call super.initialize(context) to correctly setup an operator.
super.initialize(operatorContext);
LOGGER.log(TraceLevel.TRACE, "Operator " + operatorContext.getName() + " initializing in PE: "
+ operatorContext.getPE().getPEId() + " in Job: " + operatorContext.getPE().getJobId());
StreamSchema ssOp0 = getOutput(0).getStreamSchema();
StreamSchema ssIp0 = getInput(0).getStreamSchema();
// If no output Avro message attribute specified, use default
if (outputAvroMessage == null) {
if (ssOp0.getAttributeCount() == 1) {
outputAvroMessage = ssOp0.getAttribute(0).getName();
} else {
outputAvroMessage = DEFAULT_OUTPUT_AVRO_MSG_ATTRIBUTE;
}
}
LOGGER.log(TraceLevel.TRACE, "Output Avro message attribute: " + outputAvroMessage);
// Get the Avro schema file to parse the Avro messages
LOGGER.log(TraceLevel.TRACE, "Retrieving and parsing Avro message schema file " + avroMessageSchemaFile);
InputStream avscInput = new FileInputStream(avroMessageSchemaFile);
Schema.Parser parser = new Schema.Parser();
messageSchema = parser.parse(avscInput);
// Check Streams and Avro schema
boolean validMapping = TupleToAvroConverter.isValidTupleToAvroMapping(operatorContext.getName(), ssIp0,
messageSchema);
if (!validMapping) {
throw new Exception("Streams input tuple schema cannot be mapped to Avro output schema.");
}
LOGGER.log(TraceLevel.TRACE, "Embed Avro schema in generated output Avro message block: " + embedAvroSchema);
LOGGER.log(TraceLevel.TRACE, "Submit Avro message block when punctuation is received: " + submitOnPunct);
LOGGER.log(TraceLevel.TRACE, "Ignore parsing error: " + ignoreParsingError);
// submitOnPunct is only valid if Avro schema is embedded in the output
if (submitOnPunct && !embedAvroSchema)
throw new Exception(
"Parameter submitOnPunct can only be set to true if Avro schema is embedded in the output.");
// If Avro schema is embedded in the output, submitOnPunct is mandatory
if (embedAvroSchema && !submitOnPunct && tuplesPerMessage == 0 && bytesPerMessage == 0 && timePerMessage == 0)
throw new Exception("If Avro schema is embedded in the output, you must specify one of the thresholds when "
+ "the tuple must be submitted (submitOnPunct, bytesPerMessage, timePerMessage, tuplesPerMessage).");
// Prepare and initialize variables that don't change for every input
// record
avroWriter = new GenericDatumWriter<GenericRecord>(messageSchema);
avroDataFileWriter = new DataFileWriter<GenericRecord>(avroWriter);
if (embedAvroSchema)
avroDataFileWriter.create(messageSchema, avroBlockByteArray);
numberOfBatchedMessages = 0;
LOGGER.log(TraceLevel.TRACE, "TupleToAvro operator initialized, ready to receive tuples");
}
示例8: checkInputPortSchema
import com.ibm.streams.operator.StreamSchema; //导入方法依赖的package包/类
/**
* This function checks only things that can be determined at compile time.
*
* @param checker
* @throws Exception
*/
@ContextCheck(compile = true)
public static void checkInputPortSchema(OperatorContextChecker checker)
throws Exception {
// rstring or ustring would need to be provided.
StreamSchema inputSchema = checker.getOperatorContext()
.getStreamingInputs().get(0).getStreamSchema();
boolean hasDynamic = checker.getOperatorContext().getParameterNames()
.contains(IHdfsConstants.PARAM_FILE_NAME_ATTR);
if (!hasDynamic && inputSchema.getAttributeCount() != 1) {
checker.setInvalidContext(
Messages.getString("HDFS_SINK_ONE_ATTR_INPUT_PORT", IHdfsConstants.PARAM_FILE_NAME_ATTR), new Object[] {} );
}
if (hasDynamic && inputSchema.getAttributeCount() != 2) {
checker.setInvalidContext(
Messages.getString("HDFS_SINK_TWO_ATTR_INPUT_PORT", IHdfsConstants.PARAM_FILE_NAME_ATTR, IHdfsConstants.PARAM_FILE_NAME_ATTR ) , new Object[] {});
}
if (inputSchema.getAttributeCount() == 1) {
// check that the attribute type must be a rstring or ustring
if (MetaType.RSTRING != inputSchema.getAttribute(0).getType()
.getMetaType()
&& MetaType.USTRING != inputSchema.getAttribute(0)
.getType().getMetaType()
&& MetaType.BLOB != inputSchema.getAttribute(0).getType()
.getMetaType()) {
checker.setInvalidContext(
Messages.getString("HDFS_SINK_INVALID_ATTR_TYPE", inputSchema.getAttribute(0).getType().getMetaType()), null);
}
}
if (inputSchema.getAttributeCount() == 2) {
int numString = 0;
int numBlob = 0;
for (int i = 0; i < 2; i++) {
MetaType t = inputSchema.getAttribute(i).getType()
.getMetaType();
if (MetaType.USTRING == t || MetaType.RSTRING == t) {
numString++;
} else if (MetaType.BLOB == t) {
numString++;
}
} // end for loop;
if (numBlob == 0 && numString == 2 || // data is a string
numBlob == 1 && numString == 1) { // data is a blob
// we're golden.
} else {
checker.setInvalidContext(
Messages.getString("HDFS_SINK_INVALID_ATTR_FILENAME_DATA"),
null);
}
}
}