本文整理汇总了Python中tika.parser.from_file函数的典型用法代码示例。如果您正苦于以下问题:Python from_file函数的具体用法?Python from_file怎么用?Python from_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了from_file函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: computeScores
def computeScores(inputDir, outCSV, acceptTypes):
with open(outCSV, "wb") as outF:
a = csv.writer(outF, delimiter=',')
a.writerow(["x-coordinate","y-coordinate","Similarity_score"])
files_tuple = itertools.combinations(filterFiles(inputDir, acceptTypes), 2)
for file1, file2 in files_tuple:
try:
row_cosine_distance = [file1, file2]
file1_parsedData = parser.from_file(file1)
file2_parsedData = parser.from_file(file2)
v1 = Vector(file1, ast.literal_eval(file1_parsedData["content"]))
v2 = Vector(file2, ast.literal_eval(file2_parsedData["content"]))
row_cosine_distance.append(v1.cosTheta(v2))
a.writerow(row_cosine_distance)
except ConnectionError:
sleep(1)
except KeyError:
continue
except Exception, e:
pass
示例2: run_exit_tool_on_known_type
def run_exit_tool_on_known_type(dir_list):
file_list = get_file_list(dir_list)
for entry in file_list:
parser.from_file(entry)
return
示例3: computeScores
def computeScores(inputDir, outCSV, acceptTypes, allKeys):
na_metadata = ["resourceName"]
with open(outCSV, "wb") as outF:
a = csv.writer(outF, delimiter=',')
a.writerow(["x-coordinate","y-coordinate","Similarity_score"])
filename_list = []
for root, dirnames, files in os.walk(inputDir):
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in files:
if not filename.startswith('.'):
filename_list.append(os.path.join(root, filename))
filename_list = [filename for filename in filename_list if parser.from_file(filename)]
if acceptTypes:
filename_list = [filename for filename in filename_list if str(parser.from_file(filename)['metadata']['Content-Type'].encode('utf-8')).split('/')[-1] in acceptTypes]
else:
print "Accepting all MIME Types....."
files_tuple = itertools.combinations(filename_list, 2)
for file1, file2 in files_tuple:
row_edit_distance = [file1, file2]
file1_parsedData = parser.from_file(file1)
file2_parsedData = parser.from_file(file2)
intersect_features = set(file1_parsedData["metadata"].keys()) & set(file2_parsedData["metadata"].keys())
intersect_features = [feature for feature in intersect_features if feature not in na_metadata ]
file_edit_distance = 0.0
for feature in intersect_features:
file1_feature_value = stringify(file1_parsedData["metadata"][feature])
file2_feature_value = stringify(file2_parsedData["metadata"][feature])
feature_distance = float(editdistance.eval(file1_feature_value, file2_feature_value))/(len(file1_feature_value) if len(file1_feature_value) > len(file2_feature_value) else len(file2_feature_value))
file_edit_distance += feature_distance
if allKeys:
file1_only_features = set(file1_parsedData["metadata"].keys()) - set(intersect_features)
file1_only_features = [feature for feature in file1_only_features if feature not in na_metadata]
file2_only_features = set(file2_parsedData["metadata"].keys()) - set(intersect_features)
file2_only_features = [feature for feature in file2_only_features if feature not in na_metadata]
file_edit_distance += len(file1_only_features) + len(file2_only_features)
file_edit_distance /= float(len(intersect_features) + len(file1_only_features) + len(file2_only_features))
else:
file_edit_distance /= float(len(intersect_features)) #average edit distance
row_edit_distance.append(1-file_edit_distance)
a.writerow(row_edit_distance)
示例4: command
def command(in_dir, out_dir, tika_server):
create_dirs(out_dir)
in_files = get_files(in_dir)
for fi in in_files:
if tika_server:
parsed = parser.from_file(fi, tika_server)
else:
parsed = parser.from_file(fi)
out_file = out_file_name(out_dir, fi, 'txt')
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write(parsed['content'])
示例5: intersect
def intersect(json_filename, output_name, index_file, start_index=0, end_index=yaoner.MAX_INT_VALUE):
base_directory = '/Users/Frank/Desktop/fulldump/raw-dataset/'
if index_file is None:
index_file = '/Users/Frank/PycharmProjects/599assignment1/geo-topic-parser-folder/geo-topic-all-files.txt'
with open(json_filename) as json_file:
json_data = json.load(json_file)
concept_dictionary = dict()
for key in json_data.keys():
concept_dictionary[key.lower()] = {}
file_list = yaoner.read_index_file(index_file, base_directory, start_index, end_index)
for idx, val in enumerate(file_list):
print(start_index + idx)
parsed = parser.from_file(''.join([base_directory, val]))
if 'content' in parsed and parsed['content'] is not None:
content = parsed['content']
words = content.split()
for word in words:
lowercased = word.lower()
if lowercased in concept_dictionary:
last_part = os.path.basename(val)
concept_dictionary[lowercased][last_part] = 1
dump(concept_dictionary, output_name + 'from' + str(start_index) + 'to' + str(end_index) + '.json')
return
示例6: extract
def extract(path):
parsed = parser.from_file(path)
content = parsed["content"]
ners = StanfordExtractor(content).extract()
entities = CustomEntityExtractor(content).extract()
quantities = QuantityExtractor(content).getQuantities()
if len(ners['LOCATION']) > 0:
l = GeoTopic(map(lambda l: l['name'], ners['LOCATION']))
geo = l.getInfo()
locations = l.getLocations()
else:
geo = [ ]
locations = [ ]
return {
'geo' : geo,
'locations' : locations,
'entities': entities['entities'],
'places': ners['LOCATION'],
'dates': ners['DATE'],
'quantities': quantities,
'metadata': parsed['metadata'],
'mime-type': parsed['metadata']['Content-Type'],
'id': idf.set(path)
}
示例7: run_ner
def run_ner(start_index=0, end_index=MAX_INT_VALUE):
index_file = '/Users/Frank/PycharmProjects/599assignment1/geo-topic-parser-folder/geo-topic-all-files.txt'
base_directory = '/Users/Frank/Desktop/fulldump/raw-dataset/'
file_list = read_index_file(index_file, base_directory, start_index, end_index)
measurement_list = []
index = 0 + start_index
for entry in file_list:
print(index)
parsed = parser.from_file(''.join([base_directory, entry]))
if 'metadata' in parsed:
if 'X-TIKA:EXCEPTION:embedded_exception' in parsed['metadata']:
index += 1
continue
if 'content' in parsed:
if parsed['content'] is not None:
# print(json.dumps(parsed['metadata'], indent=4))
# print(parsed['content'])
# print('content size ', len(parsed['content']))
if len(parsed['content']) > 1 * 1024 * 1024:
index += 1
continue
measurements = extract_measurement(parsed['content'])
if measurements is not None and len(measurements) > 0:
measurement_list.append({entry.split('/')[-1]: measurements})
index += 1
dump_to_json(measurement_list, '/Users/Frank/working-directory/ner-measurement-mentions/',
'from' + str(start_index) + 'to' + str(end_index))
return
示例8: filterFiles
def filterFiles(inputDir, acceptTypes):
filename_list = []
for root, dirnames, files in os.walk(inputDir):
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in files:
if not filename.startswith('.'):
filename_list.append(os.path.join(root, filename))
filename_list = [filename for filename in filename_list if parser.from_file(filename)]
if acceptTypes:
filename_list = [filename for filename in filename_list if str(parser.from_file(filename)['metadata']['Content-Type'].encode('utf-8')).split('/')[-1] in acceptTypes]
else:
print "Accepting all MIME Types....."
return filename_list
开发者ID:harshfatepuria,项目名称:Scientific-Content-Enrichment-in-the-Text-Retrieval-Conference-TREC-Polar-Dynamic-Domain-Dataset,代码行数:16,代码来源:kmeans_ext.py
示例9: compareValueSimilarity
def compareValueSimilarity (fileDir, encoding = 'utf-8') :
union_feature_names = set()
file_parsed_data = {}
resemblance_scores = {}
file_metadata={}
for filename in fileDir:
file_parsed = []
parsedData = parser.from_file(filename)
file_metadata[filename] = parsedData["metadata"]
for key in parsedData["metadata"].keys() :
value = parsedData["metadata"].get(key)[0]
if isinstance(value, list):
value = ""
for meta_value in parsedData["metadata"].get(key)[0]:
value += meta_value
file_parsed.append(str(key.strip(' ').encode(encoding) + ": " + value.strip(' ').encode(encoding)))
file_parsed_data[filename] = set(file_parsed)
union_feature_names = union_feature_names | set(file_parsed_data[filename])
total_num_features = len(union_feature_names)
for filename in file_parsed_data.keys():
overlap = {}
overlap = file_parsed_data[filename] & set(union_feature_names)
resemblance_scores[filename] = float(len(overlap))/total_num_features
sorted_resemblance_scores = sorted(resemblance_scores.items(), key=operator.itemgetter(1), reverse=True)
return sorted_resemblance_scores, file_metadata
示例10: load_topics
def load_topics(filename):
languages.append(language.from_file(filename))
parser_obj = parser.from_file(filename)
if 'content' in parser_obj and parser_obj['content']:
words.extend(get_nouns(parser_obj['content']))
if 'metadata' in parser_obj:
metadata_dict = parser_obj['metadata']
if 'Author' in metadata_dict:
if type(metadata_dict['Author']) == type([]):
metadata.append(metadata_dict['Author'][0])
else:
metadata.append(metadata_dict['Author'])
if 'xmp:CreatorTool' in metadata_dict:
if type(metadata_dict['xmp:CreatorTool']) == type([]):
metadata.extend(metadata_dict['xmp:CreatorTool'])
else:
metadata.append(metadata_dict['xmp:CreatorTool'])
if 'Content-Type' in metadata_dict:
if type(metadata_dict['Content-Type']) == type([]):
metadata.append(metadata_dict['Content-Type'][0])
else:
metadata.append(metadata_dict['Content-Type'])
if 'Company' in metadata_dict:
if type(metadata_dict['Company']) == type([]):
metadata.append(metadata_dict['Company'][0])
else:
metadata.append(metadata_dict['Company'])
示例11: __init__
def __init__(self, fileName):
parsed = parser.from_file(fileName)
metadata = parsed["metadata"]
# Return re.sub('[\s+]', '', content)
# TODO: Delete... Very Redundant..
content = parsed["content"]
content = content.replace('\n', '')
content = content.replace('\t', '')
content = content.replace('\'', '')
content = content.replace('\"', '')
rx = re.compile('\W+')
content = rx.sub(' ', content).strip()
self.content = content
# Title...
try:
title = metadata['title']
except:
title = 'Untitled'
title = title.replace('\t', '')
title = title.replace('\t', '')
title = title.replace('\'', '')
title = title.replace('\"', '')
title = rx.sub(' ', title).strip()
self.title = title
# self.type = self.metadata['Content-Type-Hint']
# self.name = self.metadata['resourceName']
# lanFix = re.sub('[\s+]', '', content)
self.lang = language.from_file(fileName)
示例12: getKeywords
def getKeywords(pdfFile,Occur):
tikaurl= tika_obo.getTikaAddress()
parsed = parser.from_file(pdfFile, tikaurl)
metadata = parsed["metadata"]
doccontent = parsed["content"]
fullwordlist = obo.stripNonAlphaNum(doccontent)
wordlist = obo.removeStopwords(fullwordlist, obo.stopwords)
dictionary = obo.wordListToFreqDict(wordlist)
sorteddict = obo.sortFreqDict(dictionary)
count = 0
keywords = []
shortkey = []
maxoccur = Occur
for s in sorteddict:
numocc = int(s[0])
word = s[1].encode('utf-8')
if numocc > maxoccur:
keyword = { word : str(numocc) }
keywords.append(keyword)
if len(word)>6:
shortkey.append(word.lower())
count = count + 1
if Occur > 0:
return shortkey
return keywords
示例13: main
def main(file_name):
fi = open("sentences.txt", "w+")
fi_summary = open("summary.txt", "w+")
fi_cool = open("wtv.txt", "w+")
score_sentences = SentenceScores()
parsed = parser.from_file(file_name)
print parsed["metadata"]
content = parsed["content"]
content = content.strip()
fi_cool.write(content.encode("utf-8"))
sentences = content.split(". ")
sentences = map(clean_sentence, sentences)
lines = score_sentences.get_summary_lines(sentences)
max_len = len(lines) / 3
needed_lines = lines[0:max_len]
sorted_lines = sorted(needed_lines, key=lambda x: x[0])
for line_num, score in sorted_lines:
fi_summary.write((str(line_num+1)+", "+sentences[line_num]).encode("utf-8"))
for sentence in sentences:
fi.write(sentence.encode("utf-8"))
fi.close()
fi_summary.close()
示例14: getTikaTags
def getTikaTags(filename):
import tika
from tika import parser
import obo
import tika_obo
import gethavens
tikaUrl = getTikaAddress()
parsed = parser.from_file(filename, tikaUrl)
metadata = parsed["metadata"]
content = parsed["content"]
jsonprops = {'cm:title': str(metadata['resourceName'])}
for key in metadata:
newkey = str(key)
value = str(metadata[key])
jsonprops[newkey] = value
title = jsonprops['resourceName']
namebreak = title.split('.')
havenrecord = gethavens.getPropertiesHaven(str(jsonprops['resourceName']))
jsonprops['Description'] = 'Ranked:' + str(havenrecord['rank']) \
+ ' most secretive Tax Haven\nhttps://www.google.co.uk/maps/place/' \
+ havenrecord['country']
jsonprops['Name'] = havenrecord['country']
jsonprops['cmis:title'] = str(title)
jsonprops['cmis:author'] = 'admin'
return jsonprops
示例15: _request_pdf_data
def _request_pdf_data(self, url):
parsed = parser.from_file(url)
return {
'url': url,
'title': self._parse_pdf_title(parsed),
'body': self._parse_pdf_body(parsed)
}