本文整理匯總了Python中dbfread.DBF屬性的典型用法代碼示例。如果您正苦於以下問題:Python dbfread.DBF屬性的具體用法?Python dbfread.DBF怎麽用?Python dbfread.DBF使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類dbfread
的用法示例。
在下文中一共展示了dbfread.DBF屬性的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: extract_elements
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def extract_elements(zf, fn, nodename):
if fn.lower().endswith('.xml'):
with zf.open(fn) as f:
et = lxml.etree.iterparse(f)
for _, node in et:
if not node.tag.endswith(f'}}{nodename}'):
continue
yield dict((j.tag[j.tag.rindex('}')+1:], j.text) for j in node.iterchildren())
node.clear()
elif fn.lower().endswith('dbf'):
with zf.open(fn) as f, NamedTemporaryFile() as temp:
shutil.copyfileobj(f, temp) # dbfread neumi cist z filehandleru, https://github.com/olemb/dbfread/issues/25
temp.flush()
d = DBF(temp.name, encoding='cp852')
yield from d
else:
raise NotImplementedError(fn)
示例2: get_dbf_path
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def get_dbf_path(table, year, data_dir):
"""Given a year and table name, returns the path to its datastore DBF file.
Args:
table (string): The name of one of the FERC Form 1 data tables. For
example 'f1_fuel' or 'f1_steam'
year (int): The year whose data you wish to find.
data_dir (str): A string representing the full path to the top level of
the PUDL datastore containing the FERC Form 1 data to be used.
Returns:
str: dbf_path, a (hopefully) OS independent path including the
filename of the DBF file corresponding to the requested year and
table name.
"""
dbf_name = pc.ferc1_tbl2dbf[table]
ferc1_dir = datastore.path(
'ferc1', year=year, file=False, data_dir=data_dir)
dbf_path = os.path.join(ferc1_dir, f"{dbf_name}.DBF")
return dbf_path
示例3: parseN
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def parseN(self, field, data): # noqa: N802
"""
Augments the Numeric DBF parser to account for bad FERC data.
There are a small number of bad entries in the backlog of FERC Form 1
data. They take the form of leading/trailing zeroes or null characters
in supposedly numeric fields, and occasionally a naked '.'
Accordingly, this custom parser strips leading and trailing zeros and
null characters, and replaces a bare '.' character with zero, allowing
all these fields to be cast to numeric values.
Args:
self ():
field ():
data ():
"""
# Strip whitespace, null characters, and zeroes
data = data.strip().strip(b'*\x00').lstrip(b'0')
# Replace bare periods (which are non-numeric) with zero.
if data == b'.':
data = b'0'
return super(FERC1FieldParser, self).parseN(field, data)
示例4: add_assessor_data
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def add_assessor_data(assessor_data_file, logger):
if logger:
logger.info("Adding assessor data...")
# This could be done more efficiently, but it only needs to run once.
dbf = dbfread.DBF(assessor_data_file)
copy_attributes = [
"LOT_SIZE", "USE_CODE", "YEAR_BUILT", "BLD_AREA", "UNITS", "STYLE",
"STORIES", "NUM_ROOMS", "ZONING"
]
processed_ids = set()
for entry in dbf:
loc_id = entry["LOC_ID"]
if loc_id in processed_ids:
continue
processed_ids.add(loc_id)
try:
parcel = Parcel.objects.get(loc_id=loc_id)
parcel.address_num = entry["ADDR_NUM"]
parcel.full_street = entry["FULL_STR"]
parcel.save()
for attr in copy_attributes:
if attr in entry:
try:
parcel.attributes.create(
name=attr, value=str(entry[attr]))
except IntegrityError:
continue
except Parcel.DoesNotExist:
continue
示例5: get_raw_df
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def get_raw_df(table, dbc_map, data_dir, years=pc.data_years['ferc1']):
"""Combine several years of a given FERC Form 1 DBF table into a dataframe.
Args:
table (string): The name of the FERC Form 1 table from which data is
read.
dbc_map (dict of dicts): A dictionary of dictionaries, of the kind
returned by get_dbc_map(), describing the table and column names
stored within the FERC Form 1 FoxPro database files.
data_dir (str): A string representing the full path to the top level of
the PUDL datastore containing the FERC Form 1 data to be used.
min_length (int): The minimum number of consecutive printable
years (list): Range of years to be combined into a single DataFrame.
Returns:
:class:`pandas.DataFrame`: A DataFrame containing several years of FERC
Form 1 data for the given table.
"""
dbf_name = pc.ferc1_tbl2dbf[table]
raw_dfs = []
for yr in years:
ferc1_dir = datastore.path(
'ferc1', year=yr, file=False, data_dir=data_dir)
dbf_path = os.path.join(ferc1_dir, f"{dbf_name}.DBF")
if os.path.exists(dbf_path):
new_df = pd.DataFrame(
iter(dbfread.DBF(dbf_path,
encoding='latin1',
parserclass=FERC1FieldParser)))
raw_dfs = raw_dfs + [new_df, ]
if raw_dfs:
return (
pd.concat(raw_dfs, sort=True).
drop('_NullFlags', axis=1, errors='ignore').
rename(dbc_map[table], axis=1)
)
示例6: main
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def main():
for filename in sys.argv[1:]:
print(filename + ':')
table = DBF(filename, ignore_missing_memofile=True)
show('Name:', table.name)
show('Memo File:', table.memofilename or '')
show('DB Version:', table.dbversion)
show('Records:', len(table))
show('Deleted Records:', len(table.deleted))
show('Last Updated:', table.date)
show('Character Encoding:', table.encoding)
show('Fields:')
for field in table.fields:
show_field(field)
示例7: read_dbf
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def read_dbf(name, encoding="cp1252"):
return dbfread.DBF(name, encoding=encoding, parserclass=ExtraFieldParser)
示例8: InterpretaionACCuracyEstimaton
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def InterpretaionACCuracyEstimaton(self,InterAccuEstiFp):
NDVITable=DBF(InterAccuEstiFp,load=True) #讀取存儲有采樣數據的.dbf類型文件
# print(NDVITable)
fieldName=[name for name in NDVITable.field_names]
print(fieldName)
records=[[record[field] for field in record] for record in NDVITable]
val=np.array(records).T
val_true=val[1]
# print(val_true)
val_interpretation=val[2]
# print(val_interpretation)
b=0.3
c=2.5
d=500
#轉換采樣數據數值,保持與解譯數據同,以用於計算混淆矩陣,進行精度評價
val_interpre=np.copy(val_interpretation)
val_interpre[val_interpre<b]=1000
val_interpre[(val_interpre>=b)&(val_interpre<c)]=2000
val_interpre[(val_interpre>=c)&(val_interpre<d)]=3000
val_interpre[val_interpre==1000]=1
val_interpre[val_interpre==2000]=2
val_interpre[val_interpre==3000]=3
# print(val_interpre)
val_true=np.array(val_true,dtype=np.int)
val_interpre=np.array(val_interpre,dtype=np.int)
print(val_true.shape,val_interpre.shape)
print(val_true,"\n",val_interpre)
confMat=confusion_matrix(y_true=val_true,y_pred=val_interpre)
print(confMat)
print(np.sum(confMat.diagonal())/150)
##直方圖與擬合曲線
示例9: get_args
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def get_args():
"""Get CLI arguments and options"""
parser = argparse.ArgumentParser(
prog='dbf2csv',
description='small utility to convert simple *.DBF files to *.CSV'
)
parser.add_argument('input')
parser.add_argument('output', nargs='?', default=None)
parser.add_argument('-ie', '--input-encoding',
default='cp850',
help='charset of *.dbf files (default: cp850)')
parser.add_argument('-oe', '--output-encoding',
default='utf8',
help='charset of *.csv files (default: utf8)')
parser.add_argument('-q', '--quoting-mode',
choices=('minimal', 'all', 'non-numeric', 'none'),
default='minimal',
help='quoting mode for csv files (default: minimal)')
parser.add_argument('-d', '--delimiter-char',
default=',',
help='delimiter char for csv files (default: ",")')
parser.add_argument('-e', '--escape-char',
default='\\',
help='escape char for csv files (default: "\\")')
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
return parser.parse_args()
示例10: __convert
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def __convert(input_file_path, output_file, args):
def encode_decode(x):
"""
DBF returns a unicode string encoded as args.input_encoding.
We convert that back into bytes and then decode as args.output_encoding.
"""
if not isinstance(x, str):
# DBF converts columns into non-str like int, float
x = str(x) if x is not None else ''
return x.encode(args.input_encoding).decode(args.output_encoding)
try:
input_reader = DBF(input_file_path,
encoding=args.input_encoding,
ignore_missing_memofile=True)
output_writer = csv.DictWriter(output_file,
quoting=args.quoting,
escapechar=args.escape_char,
delimiter=args.delimiter_char,
fieldnames=[encode_decode(x) for x in input_reader.field_names])
output_writer.writeheader()
for record in input_reader:
row = {encode_decode(k):encode_decode(v) for k,v in record.items()}
output_writer.writerow(row)
except (UnicodeDecodeError, LookupError):
log.error('Error: Unknown encoding\n')
exit(0)
except UnicodeEncodeError:
log.error('Error: Can\'t encode to output encoding: {}\n'.format(
args.to_charset))
exit(0)
except struct.error:
log.error('Error: Bad input file format: {}\n'.format(
os.path.basename(input_file_path))
)
exit(0)
示例11: __init__
# 需要導入模塊: import dbfread [as 別名]
# 或者: from dbfread import DBF [as 別名]
def __init__(self, filepath, **kwargs):
"""DBF2 constructor
Args:
filepath (str): The dbf file path or file like object.
kwargs: Keyword arguments that are passed to the DBF reader.
Kwargs:
load (bool): Load all records into memory (default: false).
encoding (bool): Character encoding (default: None, parsed from
the `language_driver`).
sanitize (bool): Convert field names to lower case
(default: False).
ignorecase (bool): Treat file name as case insensitive
(default: true).
ignore_missing_memofile (bool): Suppress `MissingMemoFile`
exceptions (default: False).
"""
try:
kwargs['recfactory'] = dict
return super(DBF2, self).__init__(filepath, **kwargs)
except (AttributeError, TypeError):
filename = filepath.name
defaults = {
'ignorecase': True, 'parserclass': FieldParser, 'recfactory': dict}
[kwargs.setdefault(k1, v1) for k1, v1 in defaults.items()]
[self.__setattr__(k2, v2) for k2, v2 in kwargs.items()]
self.name = p.splitext(p.basename(filename))[0].lower()
self.filename = ifind(filename) if self.ignorecase else filename
if not self.filename:
raise DBFNotFound('could not find file {!r}'.format(filename))
self.fields = []
self.field_names = []
self._read_headers(filepath, self.ignore_missing_memofile)
self._check_headers()
try:
year = expand_year(self.header.year)
except ValueError:
self.date = None
else:
self.date = date(year, self.header.month, self.header.day)
self.memofilename = self._get_memofilename()
if self.load:
self.load()