本文整理汇总了Python中csv.reader方法的典型用法代码示例。如果您正苦于以下问题:Python csv.reader方法的具体用法?Python csv.reader怎么用?Python csv.reader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类csv
的用法示例。
在下文中一共展示了csv.reader方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_ip_geo_localization
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def get_ip_geo_localization(self, ip):
self._logger.debug("Getting {0} geo localization ".format(ip))
if ip.strip() != "" and ip is not None:
result = linecache.getline(self._ip_localization_file, bisect.bisect(self._ip_localization_ranges, Util.ip_to_int(ip)))
result.strip('\n')
reader = csv.reader([result])
row = reader.next()
geo_loc = ";".join(row[4:6]) + " " + ";".join(row[8:9])
domain = row[9:10][0]
result = {"geo_loc": geo_loc, "domain": domain}
return result
示例2: fetch_agents_from_file
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def fetch_agents_from_file(self, filenm, agent_type):
"""
Read in a list of bartering agents from a csv file
"""
max_detect = self.props.get("max_detect",
ebm.GLOBAL_KNOWLEDGE)
with open(filenm) as f:
reader = csv.reader(f)
for row in reader:
agent = agent_type(row[0], max_detect=max_detect)
self.add_agent(agent)
for i in range(1, len(row) - 2, STEP):
good = row[i]
self.market.add_good(good)
agent.endow(good,
int(row[i + 1]),
eval("lambda qty: "
+ row[i + 2]))
logging.info("Goods = " + str(self.market))
示例3: load_predictions
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def load_predictions(env, nclasses):
path = os.path.join(env.stats_dir(), "predictions.csv")
if not os.path.exists(path):
raise FileExistsError(path)
with open(path, newline='') as csvfile:
y_score = []
y_test = []
csv_reader = csv.reader(csvfile, dialect="excel")
for row in csv_reader:
assert len(row) == nclasses * 2
y_score.append(list(map(float, row[:nclasses])))
y_test.append(list(map(float, row[nclasses:])))
y_score = np.array(y_score)
y_test = np.array(y_test)
return y_test, y_score
示例4: __init__
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def __init__(self, filename):
"""Initializes instance of DatasetMetadata."""
self._true_labels = {}
self._target_classes = {}
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
try:
row_idx_image_id = header_row.index('ImageId')
row_idx_true_label = header_row.index('TrueLabel')
row_idx_target_class = header_row.index('TargetClass')
except ValueError:
raise IOError('Invalid format of dataset metadata.')
for row in reader:
if len(row) < len(header_row):
# skip partial or empty lines
continue
try:
image_id = row[row_idx_image_id]
self._true_labels[image_id] = int(row[row_idx_true_label])
self._target_classes[image_id] = int(row[row_idx_target_class])
except (IndexError, ValueError):
raise IOError('Invalid format of dataset metadata')
示例5: load_images
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def load_images(input_dir, metadata_file_path, batch_shape):
"""Retrieve numpy arrays of images and labels, read from a directory."""
num_images = batch_shape[0]
with open(metadata_file_path) as input_file:
reader = csv.reader(input_file)
header_row = next(reader)
rows = list(reader)
row_idx_image_id = header_row.index('ImageId')
row_idx_true_label = header_row.index('TrueLabel')
images = np.zeros(batch_shape)
labels = np.zeros(num_images, dtype=np.int32)
for idx in xrange(num_images):
row = rows[idx]
filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png')
with tf.gfile.Open(filepath, 'rb') as f:
image = np.array(
Image.open(f).convert('RGB')).astype(np.float) / 255.0
images[idx, :, :, :] = image
labels[idx] = int(row[row_idx_true_label])
return images, labels
示例6: accumulate_result
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def accumulate_result(validate_lst, prob):
sum_result = {}
cnt_result = {}
size = prob.shape[0]
fi = csv.reader(open(validate_lst))
for i in range(size):
line = fi.__next__() # Python2: line = fi.next()
idx = int(line[0])
if idx not in cnt_result:
cnt_result[idx] = 0.
sum_result[idx] = np.zeros((1, prob.shape[1]))
cnt_result[idx] += 1
sum_result[idx] += prob[i, :]
for i in cnt_result.keys():
sum_result[i][:] /= cnt_result[i]
return sum_result
# In[9]:
示例7: parse_topology_file
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def parse_topology_file(self):
topology_list = []
if os.path.isfile(self.file_path):
with open(self.file_path) as topology_file:
topology_file_csv = csv.reader(topology_file)
for row in topology_file_csv:
map_size = len(bytearray(json.dumps(topology_list)))
if map_size >= BYTES_PER_FLUSH:
self._send_data(topology_list)
topology_list = []
if topology_file_csv.line_num == 1:
continue
key = ""
for index in xrange(len(row)):
if index == 0:
key = row[index]
continue
value1 = str(key) + "@@@@" + str(row[index])
value2 = str(row[index]) + "@@@@" + str(key)
if value1 not in topology_list:
topology_list.append(value1)
if value2 not in topology_list:
topology_list.append(value2)
self._send_data(topology_list)
示例8: _collect_data
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def _collect_data(directory):
"""Traverses directory collecting input and target files.
Args:
directory: base path to extracted audio and transcripts.
Returns:
list of (media_base, media_filepath, label) tuples
"""
# Returns:
data_files = []
transcripts = [
filename for filename in os.listdir(directory)
if filename.endswith(".csv")
]
for transcript in transcripts:
transcript_path = os.path.join(directory, transcript)
with open(transcript_path, "r") as transcript_file:
transcript_reader = csv.reader(transcript_file)
_ = transcript_reader.next() # Skip headers.
for transcript_line in transcript_reader:
media_name, label = transcript_line[0:2]
filename = os.path.join(directory, media_name)
data_files.append((media_name, filename, label))
return data_files
示例9: test_write_csv
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def test_write_csv(self):
filename = os.path.join(sys.path[0], "test_csv_writing.csv")
overview = {
"this": [1, 2],
"is": [3, 4],
"a": [5, 6],
"test": [7, 8]}
Export.Export._write_csv(filename, overview)
with open(filename, "r") as test_csv:
reader = csv.reader(test_csv)
test_dict = dict((header, []) for header in next(reader))
for row in reader:
for row_index, key in enumerate(test_dict.keys()):
test_dict[key].append(int(row[row_index]))
assert test_dict == overview
os.remove(filename)
示例10: csv_to_json
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def csv_to_json(mapping_name, csv_file):
reader = csv.reader(csv_file)
reader.next() # skip header row
mappings[mapping_name] = {}
for row in reader:
# print row
if row[1] != '' and row[2] != '':
mappings[mapping_name][row[0]] = {
'key1': row[1],
'val1': row[2]
}
if len(row) > 4:
if row[3] != '' and row[4] != '':
mappings[mapping_name][row[0]]['key2'] = row[3];
mappings[mapping_name][row[0]]['val2'] = row[4];
if len(row) > 6:
if row[5] != '' and row[6] != '':
mappings[mapping_name][row[0]]['key3'] = row[5];
mappings[mapping_name][row[0]]['val3'] = row[6];
# print mappings
示例11: csv_to_dict_list
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def csv_to_dict_list(s):
if s is None:
return None
result = []
cols = None
try:
reader = csv.reader(io.StringIO(s))
cols = next(reader)
row = next(reader)
while True:
result.append({cols[i]: row[i] for i in list(range(0, len(cols)))})
row = next(reader)
except StopIteration:
if cols is None:
return None
else:
return result
# noinspection PyMethodMayBeStatic
示例12: __init__
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def __init__(self, training_data_in=[], data_file=None):
'''
Initiliaze the Predictor with some training data
The training data should be a list of [mcs, input_fraction, time]
'''
self.training_data = []
self.training_data.extend(training_data_in)
if data_file:
with open(data_file, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for row in reader:
if row[0][0] != '#':
parts = row[0].split(',')
mc = int(parts[0])
scale = float(parts[1])
time = float(parts[2])
self.training_data.append([mc, scale, time])
示例13: _basic_init
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def _basic_init(self):
raw = rget(self._url)
cr = csv.reader(raw.text.splitlines(), delimiter=",")
my_list = list(cr)
factor = float(my_list[-1][3])
dd = {
"date": [
dt.datetime.strptime(my_list[i + 1][0], "%Y-%m-%d")
for i in range(len(my_list) - 1)
],
"netvalue": [
float(my_list[i + 1][3]) / factor for i in range(len(my_list) - 1)
],
"totvalue": [float(my_list[i + 1][3]) for i in range(len(my_list) - 1)],
"comment": [0 for _ in range(len(my_list) - 1)],
}
index = pd.DataFrame(data=dd)
index = index.iloc[::-1]
index = index.reset_index(drop=True)
self.price = index[index["date"].isin(opendate)]
self.price = self.price[self.price["date"] <= yesterdaydash()]
self.name = my_list[-1][2]
示例14: import_gazetteer
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def import_gazetteer(f, limit):
t = csv.reader(f, delimiter="\t")
i = 0
for row in t:
ft = Feature()
if Feature.objects.filter(url=row[0]).count() > 0:
print "duplicate row " + row[0]
else:
ft.url = row[0]
ft.preferred_name = row[1]
try:
fcode = FeatureType.objects.get(code=row[2])
except:
fcode = None
ft.feature_type = fcode
ft.admin1 = row[4]
ft.admin2 = row[5]
ft.geometry = Point(float(row[7]), float(row[6]))
ft.save()
print "saved " + ft.preferred_name
i += 1
if i > limit:
break
示例15: parse_geoplanet_places_csv
# 需要导入模块: import csv [as 别名]
# 或者: from csv import reader [as 别名]
def parse_geoplanet_places_csv(csv_file):
csv_reader = csv.reader(open(csv_file, 'rb'), dialect='excel-tab', quoting=csv.QUOTE_NONE)
for row in csv_reader:
out_line = ['P', row[0], row[1], row[6], row[7], row[8], row[10], row[18]+" 00:00:00+00", "POINT("+row[5]+" "+row[4]+")" ]
print "\t".join(out_line)
return csv_file
#* WOE_ID 0- primary "place" key
#* ISO 1- ISO 3166-1 country/territory code
#* State 2- WOEID of admin state
#* County 3- WOEID of admin county
#* Local_Admin 4- WOEID of local admin
#* Country 5- WOEID of country
#* Continent 6- WOEID of continent