当前位置: 首页>>代码示例>>Java>>正文


Java ResourceFieldSchema.getSchema方法代码示例

本文整理汇总了Java中org.apache.pig.ResourceSchema.ResourceFieldSchema.getSchema方法的典型用法代码示例。如果您正苦于以下问题:Java ResourceFieldSchema.getSchema方法的具体用法?Java ResourceFieldSchema.getSchema怎么用?Java ResourceFieldSchema.getSchema使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.pig.ResourceSchema.ResourceFieldSchema的用法示例。


在下文中一共展示了ResourceFieldSchema.getSchema方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
/**
 * Convert an Avro schema to a Pig schema
 */
public static ResourceSchema convert(Schema schema) throws IOException {

    if (AvroStorageUtils.containsGenericUnion(schema))
        throw new IOException ("We don't accept schema containing generic unions.");

    Set<Schema> visitedRecords = new HashSet<Schema>();
    ResourceFieldSchema inSchema = inconvert(schema, FIELD, visitedRecords);

    ResourceSchema tupleSchema;
    if (inSchema.getType() == DataType.TUPLE) {
        tupleSchema = inSchema.getSchema();
    } else { // other typs
        ResourceFieldSchema tupleWrapper = AvroStorageUtils.wrapAsTuple(inSchema);

        ResourceSchema topSchema = new ResourceSchema();
        topSchema.setFields(new ResourceFieldSchema[] { tupleWrapper });

        tupleSchema = topSchema;

    }
    return tupleSchema;
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:26,代码来源:AvroSchema2Pig.java

示例2: convert

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
/**
 * Convert an Avro schema to a Pig schema
 */
public static ResourceSchema convert(Schema schema) throws IOException
{

    if (AvroStorageUtils.containsGenericUnion(schema))
        throw new IOException("We don't accept schema containing generic unions.");

    Set<Schema> visitedRecords = new HashSet<Schema>();
    ResourceFieldSchema inSchema = inconvert(schema, FIELD, visitedRecords);

    ResourceSchema tupleSchema;
    if (inSchema.getType() == DataType.TUPLE)
    {
        tupleSchema = inSchema.getSchema();
    }
    else
    { // other typs
        ResourceFieldSchema tupleWrapper = AvroStorageUtils.wrapAsTuple(inSchema);

        ResourceSchema topSchema = new ResourceSchema();
        topSchema.setFields(new ResourceFieldSchema[] { tupleWrapper });

        tupleSchema = topSchema;

    }
    return tupleSchema;
}
 
开发者ID:linkedin,项目名称:Cubert,代码行数:30,代码来源:AvroSchema2Pig.java

示例3: getPigSchema

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
public static Schema getPigSchema(ResourceSchema rSchema) 
throws FrontendException {
    if(rSchema == null) {
        return null;
    }
    List<FieldSchema> fsList = new ArrayList<FieldSchema>();
    for(ResourceFieldSchema rfs : rSchema.getFields()) {
        FieldSchema fs = new FieldSchema(rfs.getName(), 
                rfs.getSchema() == null ? 
                        null : getPigSchema(rfs.getSchema()), rfs.getType());
        
        if(rfs.getType() == DataType.BAG) {
            if (fs.schema != null) { // allow partial schema
                if (fs.schema.size() == 1) {
                    FieldSchema innerFs = fs.schema.getField(0);
                    if (innerFs.type != DataType.TUPLE) {
                        ResourceFieldSchema.throwInvalidSchemaException();
                    }
                } else {
                    ResourceFieldSchema.throwInvalidSchemaException();
                }
            } 
        }
        fsList.add(fs);
    }
    return new Schema(fsList);
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:28,代码来源:Schema.java

示例4: getSchema

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
@Override
public ResourceSchema getSchema(String location, Job job)
        throws IOException {
    if (typeInfo == null) {
        typeInfo = getTypeInfo(location, job);
        // still null means case of multiple load store
        if (typeInfo == null) {
            return null;
        }
    }

    ResourceFieldSchema fs = OrcUtils.getResourceFieldSchema(typeInfo);
    return fs.getSchema();
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:15,代码来源:OrcStorage.java

示例5: convert

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
@Override
public void convert(Object from, BytesArray to) {

    // expect PigTuple holding a Tuple with only one field - chararray or bytearray
    Assert.isTrue(from instanceof PigTuple,
            String.format("Unexpected object type, expecting [%s], given [%s]", PigTuple.class, from.getClass()));

    PigTuple pt = (PigTuple) from;
    ResourceFieldSchema schema = pt.getSchema();

    // unwrap the tuple
    ResourceSchema tupleSchema = schema.getSchema();

    // empty tuple shortcut
    if (tupleSchema == null) {
        // write empty doc
        to.bytes("{}");
        return;
    }

    ResourceFieldSchema[] fields = tupleSchema.getFields();
    Assert.isTrue(fields.length == 1, "When using JSON input, only one field is expected");

    Object object;
    byte type;

    try {
        object = pt.getTuple().get(0);
        type = pt.getTuple().getType(0);
    } catch (Exception ex) {
        throw new EsHadoopIllegalStateException("Encountered exception while processing tuple", ex);
    }


    if (type == DataType.BIGCHARARRAY || type == DataType.CHARARRAY) {
        to.bytes(object.toString());
        return;
    }
    if (type == DataType.BYTEARRAY) {
        DataByteArray dba = (DataByteArray) object;
        to.bytes(dba.get(), dba.size());
        return;
    }

    throw new EsHadoopIllegalArgumentException(String.format("Cannot handle Pig type [%s]; expecting [%s,%s]", object.getClass(), String.class, DataByteArray.class));
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:47,代码来源:PigBytesConverter.java

示例6: writeTuple

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
private Result writeTuple(Object object, ResourceFieldSchema field, Generator generator, boolean writeTupleFieldNames, boolean isRoot) {
    ResourceSchema nestedSchema = field.getSchema();

    Result result = Result.SUCCESFUL();
    boolean writeAsObject = isRoot || writeTupleFieldNames;

    boolean isEmpty = (nestedSchema == null);

    if (!isEmpty) {
        // check if the tuple contains only empty fields
        boolean allEmpty = true;
        for (ResourceFieldSchema nestedField : nestedSchema.getFields()) {
            allEmpty &= (nestedField.getSchema() == null && PigUtils.isComplexType(nestedField));
        }
        isEmpty = allEmpty;
    }

    // empty tuple shortcut
    if (isEmpty) {
        if (!isRoot) {
            generator.writeBeginArray();
        }
        if (writeAsObject) {
            generator.writeBeginObject();
            generator.writeEndObject();
        }
        if (!isRoot) {
            generator.writeEndArray();
        }
        return result;
    }

    ResourceFieldSchema[] nestedFields = nestedSchema.getFields();

    // use getAll instead of get(int) to avoid having to handle Exception...
    List<Object> tuples = ((Tuple) object).getAll();

    if (!isRoot) {
        generator.writeBeginArray();
    }

    if (writeAsObject) {
        generator.writeBeginObject();
    }

    for (int i = 0; i < nestedFields.length; i++) {
        if (writeAsObject) {
            String name = nestedFields[i].getName();
            // handle schemas without names
            name = (StringUtils.hasText(name) ? alias.toES(name) : Integer.toString(i));
            generator.writeFieldName(name);
        }
        Result res = write(tuples.get(i), nestedFields[i], generator);
        if (!res.isSuccesful()) {
            return res;
        }
    }
    if (writeAsObject) {
        generator.writeEndObject();
    }
    if (!isRoot) {
        generator.writeEndArray();
    }

    return result;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:67,代码来源:PigValueWriter.java

示例7: extractField

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
@Override
protected Object extractField(Object target) {
    List<String> fieldNames = getFieldNames();
    for (int index = 0; index < fieldNames.size(); index++) {
        String fieldName = fieldNames.get(index);
        if (target instanceof PigTuple) {
            PigTuple pt = (PigTuple) target;
            ResourceFieldSchema[] fields = pt.getSchema().getSchema().getFields();

            boolean foundField = false;
            for (int i = 0; i < fields.length && !foundField; i++) {
                ResourceFieldSchema field = fields[i];
                if (fieldName.equals(field.getName())) {
                    foundField = true;
                    byte type = field.getType();
                    try {
                        Object object = pt.getTuple().get(i);
                        if (DataType.isAtomic(type)) {
                            target = object.toString();
                        }
                        else if (type == DataType.TUPLE) {
                            PigTuple rpt = new PigTuple(field.getSchema());
                            if (object instanceof PigTuple) {
                                rpt.setTuple(((PigTuple) object).getTuple());
                            }
                            else {
                                rpt.setTuple((Tuple) object);
                            }
                            target = rpt;
                        }
                        else {
                            Assert.isTrue(false, String.format("Unsupported data type [%s] for field [%s]; use only 'primitives' or 'tuples'", DataType.findTypeName(type), fieldName));
                        }
                    } catch (ExecException ex) {
                        throw new EsHadoopIllegalStateException(String.format("Cannot retrieve field [%s]", fieldName), ex);
                    }
                }
            }
        }
        else {
            return NOT_FOUND;
        }
    }
    return target;
}
 
开发者ID:xushjie1987,项目名称:es-hadoop-v2.2.0,代码行数:46,代码来源:PigFieldExtractor.java

示例8: consumeMap

import org.apache.pig.ResourceSchema.ResourceFieldSchema; //导入方法依赖的package包/类
private Map<String, Object> consumeMap(PushbackInputStream in, ResourceFieldSchema fieldSchema) throws IOException {
    int buf;

    while ((buf=in.read())!='[') {
        if (buf==-1) {
            throw new IOException("Unexpect end of map");
        }
    }
    HashMap<String, Object> m = new HashMap<String, Object>();
    ByteArrayOutputStream mOut = new ByteArrayOutputStream(BUFFER_SIZE);
    while (true) {
        // Read key (assume key can not contains special character such as #, (, [, {, }, ], )
        while ((buf=in.read())!='#') {
            if (buf==-1) {
                throw new IOException("Unexpect end of map");
            }
            mOut.write(buf);
        }
        String key = bytesToCharArray(mOut.toByteArray());
        if (key.length()==0)
            throw new IOException("Map key can not be null");

        // Read value
        mOut.reset();
        Deque<Character> level = new LinkedList<Character>(); // keep track of nested tuple/bag/map. We do not interpret, save them as bytearray
        while (true) {
            buf=in.read();
            if (buf==-1) {
                throw new IOException("Unexpect end of map");
            }
            if (buf=='['||buf=='{'||buf=='(') {
                level.push((char)buf);
            }
            else if (buf==']' && level.isEmpty()) // End of map
                break;
            else if (buf==']' ||buf=='}'||buf==')')
            {
            	if (level.isEmpty())
            		throw new IOException("Malformed map");

                if (level.peek()==findStartChar((char)buf))
                    level.pop();
            } else if (buf==','&&level.isEmpty()) { // Current map item complete
                break;
            }
            mOut.write(buf);
        }
        Object value = null;
        if (fieldSchema!=null && fieldSchema.getSchema()!=null && mOut.size()>0) {
            value = bytesToObject(mOut.toByteArray(), fieldSchema.getSchema().getFields()[0]);
        } else if (mOut.size()>0) { // untyped map
            value = new DataByteArray(mOut.toByteArray());
        }
        m.put(key, value);
        mOut.reset();
        if (buf==']')
            break;
    }
    return m;
}
 
开发者ID:sigmoidanalytics,项目名称:spork-streaming,代码行数:61,代码来源:Utf8StorageConverter.java


注:本文中的org.apache.pig.ResourceSchema.ResourceFieldSchema.getSchema方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。