当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.load方法代码示例

本文整理汇总了Python中tablib.Dataset.load方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.load方法的具体用法?Python Dataset.load怎么用?Python Dataset.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tablib.Dataset的用法示例。


在下文中一共展示了Dataset.load方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ExcelUploader

# 需要导入模块: from tablib import Dataset [as 别名]
# 或者: from tablib.Dataset import load [as 别名]
class ExcelUploader(BaseManager):
    """Manage general uploading."""

    response = {'success': False, 'desc': ''}

    def __init__(self, request, **kwargs):
        """Instantiate class ."""
        self.request = request
        self.request.user = extendUser(request.user)
        self.model = kwargs['model']

        self.compulsatory_fields = kwargs['compulsatory_fields']
        self.optional_fields = kwargs['optional_fields']
        self.template_location = kwargs['template_location']
        self.template_zipped_name = kwargs['template_zipped_name']
        self.form_input_name = kwargs['form_input_name']

    def get_template(self):
        """Return the template used to import storm drain data."""
        if self.request.user.is_epidemiologist_editor():
            in_memory = StringIO()
            zip = ZipFile(in_memory, "a")
            # Add files to zip
            for dirname, subdirs, files in os.walk(self.template_location):
                for filename in files:
                    zip.write(os.path.join(dirname, filename), filename)
            # close zip
            zip.close()
            # prepare response object
            response = HttpResponse(content_type="application/zip")
            response["Content-Disposition"] = ("attachment;filename=" +
                                               self.template_zipped_name)
            # write files into the response
            in_memory.seek(0)
            response.write(in_memory.read())

            return response
        else:
            return self._end_unauthorized()

    def _match_column_to_field(self, fields, file_headers, key):
        """Match each column name with the model fields."""
        for field in fields:
            header_position = 0
            for header in file_headers:
                if header == field:
                    self.read_dataset.headers[header_position] = key
                else:
                    header_position = header_position + 1

    def _get_missing_fields(self, file_headers):
        """Return all missing headers."""
        # has_missing = False
        missing_headers = []
        for key, fieldVariants in (
                self.compulsatory_fields.iteritems()
                ):

            if not bool(set(fieldVariants) & set(file_headers)):
                missing_headers.append(key)
            else:
                # find out the match between column name and model field
                self._match_column_to_field(
                    fieldVariants,
                    file_headers,
                    key
                )
        return missing_headers

    def _get_optional_fields(self, file_headers):
        for key, fieldVariants in self.optional_fields.iteritems():
            # find out the match between column name and model field
            self._match_column_to_field(
                fieldVariants,
                file_headers,
                key
            )

    def _prepare_dataset_to_import(self):
        # Add column 'user_id'
        self.read_dataset.insert_col(
            0,
            col=([self.request.user.id, ]
                 * self.read_dataset.height),
            header="user_id"
        )

        booleans = []
        numerics = []

        for field in Epidemiology._meta.fields:
            if field.get_internal_type() == 'NullBooleanField':
                booleans.append(field.name)
            elif field.get_internal_type() in [
                        'FloatField',
                        'DecimalField',
                        'IntegerField'
                    ]:
                numerics.append(field.name)

#.........这里部分代码省略.........
开发者ID:MoveLab,项目名称:tigatrapp-server,代码行数:103,代码来源:upload.py

示例2: StormDrainUploader

# 需要导入模块: from tablib import Dataset [as 别名]
# 或者: from tablib.Dataset import load [as 别名]
class StormDrainUploader(StormDrainVersioningMixin, BaseManager):
    """Manage the storm drains data uploading."""

    response = {'success': False, 'desc': ''}

    def get_template(self):
        """Return the template used to import storm drain data."""
        if self.request.user.is_authorized():
            in_memory = StringIO()
            zip = ZipFile(in_memory, "a")
            # Add files to zip
            for dirname, subdirs, files in os.walk(stormdrain_templates_path):
                for filename in files:
                    zip.write(os.path.join(dirname, filename), filename)
            # close zip
            zip.close()
            # prepare response object
            response = HttpResponse(content_type="application/zip")
            response["Content-Disposition"] = ("attachment;filename="
                                               "mosquito_alert_template.zip")
            # write files into the response
            in_memory.seek(0)
            response.write(in_memory.read())

            return response
        else:
            return self._end_unauthorized()

    def _match_column_to_field(self, fields, file_headers, key):
        """Match each column name with the model fields."""
        for field in fields:
            header_position = 0
            for header in file_headers:
                if header == field:
                    self.read_dataset.headers[header_position] = key
                else:
                    header_position = header_position + 1

    def _get_missing_fields(self, file_headers):
        """Return all missing headers."""
        # has_missing = False
        missing_headers = []
        for key, fieldVariants in (
                compulsatory_stormdrain_fields.iteritems()
                ):
            if not bool(set(fieldVariants) & set(file_headers)):
                missing_headers.append(key)
            else:
                # find out the match between column name and model field
                self._match_column_to_field(
                    fieldVariants,
                    file_headers,
                    key
                )
        return missing_headers

    def _get_optional_fields(self, file_headers):
        for key, fieldVariants in optional_stormdrain_fields.iteritems():
            # find out the match between column name and model field
            self._match_column_to_field(
                fieldVariants,
                file_headers,
                key
            )

    def _prepare_dataset_to_import(self):
        # Add column 'user_id'
        self.read_dataset.insert_col(
            0,
            col=([self.request.user.id, ]
                 * self.read_dataset.height),
            header="user_id"
        )
        # Add column 'version'
        self.last_ver = self._get_last_version_id(self.request)
        self.read_dataset.insert_col(
            0,
            col=[self.last_ver+1, ]*self.read_dataset.height,
            header="version"
        )
        # Define index position of headers
        headers = {k: v for v, k in
                   enumerate(self.read_dataset.headers)}
        # Add original lat, lon columns
        self.read_dataset.append_col(
            self.read_dataset.get_col(headers['lon']),
            header="original_lon"
        )
        self.read_dataset.append_col(
            self.read_dataset.get_col(headers['lat']),
            header="original_lat"
        )

        booleans = []
        numerics = []

        for field in StormDrain._meta.fields:
            if field.get_internal_type() == 'NullBooleanField':
                booleans.append(field.name)
            elif field.get_internal_type() in [
#.........这里部分代码省略.........
开发者ID:MoveLab,项目名称:tigatrapp-server,代码行数:103,代码来源:stormdrains.py


注:本文中的tablib.Dataset.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。