本文整理汇总了Python中arches.app.search.search_engine_factory.SearchEngineFactory.index_data方法的典型用法代码示例。如果您正苦于以下问题:Python SearchEngineFactory.index_data方法的具体用法?Python SearchEngineFactory.index_data怎么用?Python SearchEngineFactory.index_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类arches.app.search.search_engine_factory.SearchEngineFactory
的用法示例。
在下文中一共展示了SearchEngineFactory.index_data方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bulk_delete
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def test_bulk_delete(self):
"""
Test bulk deleting of documents in Elasticsearch
"""
se = SearchEngineFactory().create()
# se.create_index(index='test')
for i in range(10):
x = {
'id': i,
'type': 'prefLabel',
'value': 'test pref label',
}
se.index_data(index='test', doc_type='test', body=x, idfield='id', refresh=True)
y = {
'id': i + 100,
'type': 'altLabel',
'value': 'test alt label',
}
se.index_data(index='test', doc_type='test', body=y, idfield='id', refresh=True)
query = Query(se, start=0, limit=100)
match = Match(field='type', query='altLabel')
query.add_query(match)
query.delete(index='test', refresh=True)
self.assertEqual(se.es.count(index='test', doc_type='test')['count'], 10)
示例2: index_resources_by_type
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index_resources_by_type(resource_types, result_summary):
"""
Collects and indexes all resources
"""
for resource_type in resource_types:
resources = archesmodels.Entities.objects.filter(entitytypeid = resource_type)
print "Indexing {0} {1} resources".format(len(resources), resource_type[0])
result_summary[resource_type[0]] = {'database':len(resources), 'indexed':0}
errors = []
for resource in resources:
try:
resource = Resource().get(resource.entityid)
resource.index()
except Exception as e:
if e not in errors:
errors.append(e)
if len(errors) > 0:
print errors[0], ':', len(errors)
se = SearchEngineFactory().create()
related_resource_records = archesmodels.RelatedResource.objects.all()
for related_resource_record in related_resource_records:
se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(related_resource_record), idfield='resourcexid')
return result_summary
示例3: save
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def save(self):
from arches.app.search.search_engine_factory import SearchEngineFactory
se = SearchEngineFactory().create()
if not self.created:
self.created = datetime.datetime.now()
self.modified = datetime.datetime.now()
document = model_to_dict(self)
se.index_data(index='resource_relations', doc_type='all', body=document, idfield='resourcexid')
super(ResourceXResource, self).save()
示例4: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self, scheme=None):
if self.category == 'label':
se = SearchEngineFactory().create()
data = JSONSerializer().serializeToPython(self)
if scheme == None:
scheme = self.get_scheme_id()
if scheme == None:
raise Exception('Index of label failed. Index type (scheme id) could not be derived from the label.')
data['top_concept'] = scheme.id
se.index_data('strings', 'concept', data, 'id')
示例5: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self, scheme=None):
if self.category == 'label':
se = SearchEngineFactory().create()
data = JSONSerializer().serializeToPython(self)
if scheme == None:
scheme = self.get_scheme_id()
if scheme == None:
raise Exception('Index of label failed. Index type (scheme id) could not be derived from the label.')
se.create_mapping('concept_labels', scheme.id, fieldname='conceptid', fieldtype='string', fieldindex='not_analyzed')
se.index_data('concept_labels', scheme.id, data, 'id')
# don't create terms for entity type concepts
if not(scheme.id == '00000000-0000-0000-0000-000000000003' or scheme.id == '00000000-0000-0000-0000-000000000004'):
se.index_term(self.value, self.id, scheme.id, {'conceptid': self.conceptid})
示例6: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self):
"""
Indexes all the nessesary items values of a resource to support search
"""
if unicode(self.graph_id) != unicode(settings.SYSTEM_SETTINGS_RESOURCE_MODEL_ID):
se = SearchEngineFactory().create()
datatype_factory = DataTypeFactory()
node_datatypes = {str(nodeid): datatype for nodeid, datatype in models.Node.objects.values_list('nodeid', 'datatype')}
document, terms = self.get_documents_to_index(datatype_factory=datatype_factory, node_datatypes=node_datatypes)
document['root_ontology_class'] = self.get_root_ontology()
se.index_data('resource', self.graph_id, JSONSerializer().serializeToPython(document), id=self.pk)
for term in terms:
se.index_data('strings', 'term', term['_source'], id=term['_id'])
示例7: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self):
"""
Indexes all the nessesary items values of a resource to support search
"""
se = SearchEngineFactory().create()
datatype_factory = DataTypeFactory()
node_datatypes = {str(nodeid): datatype for nodeid, datatype in models.Node.objects.values_list('nodeid', 'datatype')}
document, terms = self.get_documents_to_index(datatype_factory=datatype_factory, node_datatypes=node_datatypes)
se.index_data('resource', self.graph_id, JSONSerializer().serializeToPython(document), id=self.pk)
for term in terms:
se.index_data('strings', 'term', term['_source'], id=term['_id'])
示例8: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self, scheme=None):
if self.category == 'label':
se = SearchEngineFactory().create()
data = JSONSerializer().serializeToPython(self)
if scheme == None:
scheme = self.get_scheme_id()
if scheme == None:
raise Exception('Index of label failed. Index type (scheme id) could not be derived from the label.')
se.create_mapping('concept_labels', scheme.id, fieldname='conceptid', fieldtype='string', fieldindex='not_analyzed')
se.index_data('concept_labels', scheme.id, data, 'id')
#Looks up whether the label is actually a dropdown label or an entity label and, if so, excludes them from the term search index.
entity_or_dropdown= archesmodels.ConceptRelations.objects.filter(Q(relationtype ='hasCollection') | Q(relationtype ='hasEntity'),conceptidto = scheme.id)
is_entity_or_dropdown = False if entity_or_dropdown.count() == 0 else True
# don't create terms for entity type concepts
if not(scheme.id == '00000000-0000-0000-0000-000000000003' or scheme.id == '00000000-0000-0000-0000-000000000004') and is_entity_or_dropdown ==False:
se.index_term(self.value, self.id, scheme.id, {'conceptid': self.conceptid})
示例9: update
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def update(self, data, files):
se = SearchEngineFactory().create()
related_resources_data = data.get('related-resources', [])
original_relations = self.resource.get_related_resources()
if self.resource.entityid == '':
self.resource.save()
relationship_ids = []
for related_resource in related_resources_data:
relationship_id = related_resource['relationship']['resourcexid']
relationship_ids.append(relationship_id)
resource_id = related_resource['relatedresourceid']
relationship_type_id = related_resource['relationship']['relationshiptype']
if isinstance(relationship_type_id, dict):
relationship_type_id = relationship_type_id['value']
notes = related_resource['relationship']['notes']
date_started = related_resource['relationship']['datestarted']
date_ended = related_resource['relationship']['dateended']
if not relationship_id:
relationship = self.resource.create_resource_relationship(resource_id,
relationship_type_id=relationship_type_id,
notes=notes,
date_started=date_started,
date_ended=date_ended
)
else:
relationship = RelatedResource.objects.get(pk=relationship_id)
relationship.relationshiptype = relationship_type_id
relationship.notes = notes
relationship.datestarted = date_started
relationship.dateended = date_ended
relationship.save()
se.delete(index='resource_relations', doc_type='all', id=relationship_id)
se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(relationship), idfield='resourcexid')
for relatedentity in original_relations:
if relatedentity['relationship'].resourcexid not in relationship_ids:
se.delete(index='resource_relations', doc_type='all', id=relatedentity['relationship'].resourcexid)
relatedentity['relationship'].delete()
示例10: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self, documents, index, type, idfield, processdoc=None, getid=None, bulk=False):
detail = ''
bulkitems = []
errorlist = []
se = SearchEngineFactory().create()
if not isinstance(documents, list):
documents = [documents]
for document in documents:
#print "inserting document: %s" % (document)
sys.stdout.write('.')
if processdoc == None:
data = document
else:
data = processdoc(document)
id = None
if getid != None:
id = getid(document, data)
try:
if bulk:
bulkitem = se.create_bulk_item(index, type, id, data)
bulkitems.append(bulkitem[0])
bulkitems.append(bulkitem[1])
else:
se.index_data(index, type, data, idfield=idfield, id=id)
except Exception as detail:
errorlist.append(id)
if bulk:
try:
se.bulk_index(index, type, bulkitems)
except Exception as detail:
errorlist = bulkitems
print 'bulk inset failed'
if detail != '':
print "\n\nException detail: %s " % (detail)
print "There was a problem indexing the following items:"
print errorlist
示例11: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(self):
"""
Indexes all the nessesary documents related to resources to support the map, search, and reports
"""
se = SearchEngineFactory().create()
search_documents = self.prepare_documents_for_search_index()
for document in search_documents:
se.index_data('entity', self.entitytypeid, document, id=self.entityid)
report_documents = self.prepare_documents_for_report_index(geom_entities=document['geometries'])
for report_document in report_documents:
se.index_data('resource', self.entitytypeid, report_document, id=self.entityid)
geojson_documents = self.prepare_documents_for_map_index(geom_entities=document['geometries'])
for geojson in geojson_documents:
se.index_data('maplayers', self.entitytypeid, geojson, idfield='id')
for term in self.prepare_terms_for_search_index():
se.index_term(term['term'], term['entityid'], term['context'], term['options'])
示例12: index
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
def index(documents, index, type, idfield, processdoc=None, getid=None, bulk=False):
print 'index_concepts.index'
detail = ''
bulkitems = []
errorlist = []
se = SearchEngineFactory().create()
if not isinstance(documents, list):
documents = [documents]
for document in documents:
sys.stdout.write('.')
if processdoc == None:
data = document
else:
data = processdoc(document)
id = None
if getid != None:
id = getid(document, data)
try:
if bulk:
bulkitem = se.create_bulk_item(index, type, id, data)
bulkitems.append(bulkitem[0])
bulkitems.append(bulkitem[1])
else:
se.index_data(index, type, data, idfield=idfield, id=id)
#se.index_data('concept_labels', '00000000-0000-0000-0000-000000000005', data, 'id')
for concept in data['labels']:
#se.index_term(concept['label'], concept['labelid'], '00000000-0000-0000-0000-000000000005', settings.PUBLISHED_LABEL, {'conceptid': data['conceptid']})
if concept['label'].strip(' \t\n\r') != '':
already_indexed = False
count = 1
ids = [id]
try:
_id = uuid.uuid3(uuid.NAMESPACE_DNS, '%s%s' % (hash(concept['label']), hash(data['conceptid'])))
result = se.es.get(index='term', doc_type='value', id=_id, ignore=404)
#print 'result: %s' % result
if result['found'] == True:
ids = result['_source']['ids']
if id not in ids:
ids.append(id)
else:
ids = [id]
if data['context'] != '00000000-0000-0000-0000-000000000003' and data['context'] != '00000000-0000-0000-0000-000000000004':
se.index_data('term', 'value', {'term': concept['label'], 'context': data['context'], 'ewstatus': settings.PUBLISHED_LABEL, 'options': {'conceptid': data['conceptid']}, 'count': len(ids), 'ids': ids}, id=_id)
except Exception as detail:
raise detail
except Exception as detail:
print detail
errorlist.append(id)
if bulk:
try:
se.bulk_index(index, type, bulkitems)
except Exception as detail:
errorlist = bulkitems
print 'bulk inset failed'
if detail != '':
print "\n\nException detail: %s " % (detail)
print "There was a problem indexing the following items:"
print errorlist
示例13: ResourceLoader
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
#.........这里部分代码省略.........
if archesjson == False:
masterGraph = None
if current_entitiy_type != resource.entitytypeid:
schema = Resource.get_mapping_schema(resource.entitytypeid)
master_graph = self.build_master_graph(resource, schema)
self.pre_save(master_graph)
try:
uuid.UUID(resource.resource_id)
entityid = resource.resource_id
except(ValueError):
entityid = ''
master_graph.save(user=self.user, note=load_id, resource_uuid=entityid)
master_graph.index()
resource.entityid = master_graph.entityid
legacyid_to_entityid[resource.resource_id] = master_graph.entityid
else:
new_resource = Resource(resource)
new_resource.save(user=self.user, note=load_id, resource_uuid=new_resource.entityid)
try:
new_resource.index()
except:
print 'Could not index resource. This may be because the valueid of a concept is not in the database.'
legacyid_to_entityid[new_resource.entityid] = new_resource.entityid
ret['successfully_saved'] += 1
ret['legacyid_to_entityid'] = legacyid_to_entityid
elapsed = (time() - start)
print len(resource_list), 'resources loaded'
if len(resource_list) > 0:
print 'total time to etl = %s' % (elapsed)
print 'average time per entity = %s' % (elapsed/len(resource_list))
print 'Load Identifier =', load_id
print '***You can reverse this load with the following command:'
print 'python manage.py packages -o remove_resources --load_id', load_id
return ret
def build_master_graph(self, resource, schema):
master_graph = None
entity_data = []
if len(entity_data) > 0:
master_graph = entity_data[0]
for mapping in entity_data[1:]:
master_graph.merge(mapping)
for group in resource.groups:
entity_data2 = []
for row in group.rows:
entity = Resource()
entity.create_from_mapping(row.resourcetype, schema[row.attributename]['steps'], row.attributename, row.attributevalue)
entity_data2.append(entity)
mapping_graph = entity_data2[0]
for mapping in entity_data2[1:]:
mapping_graph.merge(mapping)
if master_graph == None:
master_graph = mapping_graph
else:
node_type_to_merge_at = schema[row.attributename]['mergenodeid']
master_graph.merge_at(mapping_graph, node_type_to_merge_at)
return master_graph
def pre_save(self, master_graph):
pass
def relate_resources(self, relationship, legacyid_to_entityid, archesjson):
start_date = None if relationship['START_DATE'] in ('', 'None') else relationship['START_DATE']
end_date = None if relationship['END_DATE'] in ('', 'None') else relationship['END_DATE']
if archesjson == False:
relationshiptype_concept = Concepts.objects.get(legacyoid = relationship['RELATION_TYPE'])
concept_value = Values.objects.filter(conceptid = relationshiptype_concept.conceptid).filter(valuetype = 'prefLabel')
entityid1 = legacyid_to_entityid[relationship['RESOURCEID_FROM']]
entityid2 = legacyid_to_entityid[relationship['RESOURCEID_TO']]
else:
concept_value = Values.objects.filter(valueid = relationship['RELATION_TYPE'])
entityid1 = relationship['RESOURCEID_FROM']
entityid2 = relationship['RESOURCEID_TO']
related_resource_record = ResourceXResource(
entityid1 = entityid1,
entityid2 = entityid2,
notes = relationship['NOTES'],
relationshiptype = concept_value[0].valueid,
datestarted = start_date,
dateended = end_date,
)
related_resource_record.save()
self.se.index_data(index='resource_relations', doc_type='all', body=model_to_dict(related_resource_record), idfield='resourcexid')
示例14: ResourceLoader
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
#.........这里部分代码省略.........
try:
uuid.UUID(resource.resource_id)
entityid = resource.resource_id
except (ValueError):
entityid = ""
master_graph.save(user=self.user, note=load_id, resource_uuid=entityid)
master_graph.index()
resource.entityid = master_graph.entityid
legacyid_to_entityid[resource.resource_id] = master_graph.entityid
else:
new_resource = Resource(resource)
new_resource.save(user=self.user, note=load_id, resource_uuid=new_resource.entityid)
try:
new_resource.index()
except:
print "Could not index resource. This may be because the valueid of a concept is not in the database."
legacyid_to_entityid[new_resource.entityid] = new_resource.entityid
ret["successfully_saved"] += 1
ret["legacyid_to_entityid"] = legacyid_to_entityid
elapsed = time() - start
print len(resource_list), "resources loaded"
if len(resource_list) > 0:
print "total time to etl = %s" % (elapsed)
print "average time per entity = %s" % (elapsed / len(resource_list))
print "Load Identifier =", load_id
print "***You can reverse this load with the following command:"
print "python manage.py packages -o remove_resources --load_id", load_id
return ret
def build_master_graph(self, resource, schema):
master_graph = None
entity_data = []
if len(entity_data) > 0:
master_graph = entity_data[0]
for mapping in entity_data[1:]:
master_graph.merge(mapping)
for group in resource.groups:
entity_data2 = []
for row in group.rows:
entity = Resource()
entity.create_from_mapping(
row.resourcetype, schema[row.attributename]["steps"], row.attributename, row.attributevalue
)
entity_data2.append(entity)
mapping_graph = entity_data2[0]
for mapping in entity_data2[1:]:
mapping_graph.merge(mapping)
if master_graph == None:
master_graph = mapping_graph
else:
node_type_to_merge_at = schema[row.attributename]["mergenodeid"]
master_graph.merge_at(mapping_graph, node_type_to_merge_at)
return master_graph
def pre_save(self, master_graph):
pass
def relate_resources(self, relationship, legacyid_to_entityid, archesjson):
start_date = None if relationship["START_DATE"] in ("", "None") else relationship["START_DATE"]
end_date = None if relationship["END_DATE"] in ("", "None") else relationship["END_DATE"]
if archesjson == False:
relationshiptype_concept = Concept.objects.get(legacyoid=relationship["RELATION_TYPE"])
concept_value = Value.objects.filter(concept=relationshiptype_concept.conceptid).filter(
valuetype="prefLabel"
)
entityid1 = legacyid_to_entityid[relationship["RESOURCEID_FROM"]]
entityid2 = legacyid_to_entityid[relationship["RESOURCEID_TO"]]
else:
concept_value = Value.objects.filter(valueid=relationship["RELATION_TYPE"])
entityid1 = relationship["RESOURCEID_FROM"]
entityid2 = relationship["RESOURCEID_TO"]
related_resource_record = ResourceXResource(
entityid1=entityid1,
entityid2=entityid2,
notes=relationship["NOTES"],
relationshiptype=concept_value[0].valueid,
datestarted=start_date,
dateended=end_date,
)
related_resource_record.save()
self.se.index_data(
index="resource_relations",
doc_type="all",
body=model_to_dict(related_resource_record),
idfield="resourcexid",
)
示例15: main
# 需要导入模块: from arches.app.search.search_engine_factory import SearchEngineFactory [as 别名]
# 或者: from arches.app.search.search_engine_factory.SearchEngineFactory import index_data [as 别名]
#.........这里部分代码省略.........
END IF;
-- Supports RDM where no concpetid or legacyoid is fed in
IF NEW.CONCEPTID IS NULL AND (NEW.LEGACYOID IS NULL OR NEW.LEGACYOID = '') THEN
NEW.LEGACYOID = v_uuid::text;
END IF;
-- I would assume that two cases below are handled in python code by being explicit about insert values for both columns... just coding defensively here. ABL.
-- Supports where ETL provided conceptid is a UUID and will be kept, but no LEGACYOID provided.
IF NEW.CONCEPTID IS NOT NULL and (NEW.LEGACYOID is null or NEW.LEGACYOID = '') THEN
NEW.LEGACYOID = NEW.CONCEPTID::text;
END IF;
-- Supports where ETL'ed conceptid is not a UUID. Populates original "concpetid" as LEGACYOID.
IF NEW.LEGACYOID IS NOT NULL OR NEW.LEGACYOID != '' then
NEW.LEGACYOID = NEW.LEGACYOID;
END IF;
RETURN NEW;
END$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION concepts.concpets_ins()
OWNER TO postgres;
-- Trigger: concepts_ins_tgr on concepts.concepts
DROP TRIGGER IF EXISTS concepts_ins_tgr ON concepts.concepts;
CREATE TRIGGER concepts_ins_tgr
BEFORE INSERT
ON concepts.concepts
FOR EACH ROW
EXECUTE PROCEDURE concepts.concpets_ins();"""
with transaction.atomic():
#import arches.management.patches.upgrade_to_v3_0_4
cursor = connection.cursor()
cursor.execute(sql)
anonymous_user, created = User.objects.get_or_create(username='anonymous')
if created:
anonymous_user.set_password('')
read_group, created = Group.objects.get_or_create(name='read')
anonymous_user.groups.add(read_group)
edit_group, created = Group.objects.get_or_create(name='edit')
admin_user = User.objects.get(username='admin')
admin_user.groups.add(edit_group)
admin_user.groups.add(read_group)
print '\nINSTALLING PYSHP MODULE'
print '-----------------------'
pip.main(['install', 'pyshp'])
print '\nUPDATING ENTITY INDEX'
print '---------------------'
# Add numbers array to resources that do not have them. Move numbers data from child_entities to numbers array in index.
resourceid_sql = "SELECT entityid FROM data.entities WHERE entitytypeid IN (SELECT distinct(entitytypeid) FROM data.entity_types WHERE isresource =True);"
cursor.execute(resourceid_sql)
resourceids = []
for val in cursor.fetchall():
resourceids.append(val[0])
start = time.time()
records = 0
se = SearchEngineFactory().create()
for resourceid in resourceids:
indexed_resource = se.search(index='entity', id=resourceid)
if 'numbers' not in indexed_resource['_source']:
indexed_resource['_source']['numbers'] = []
else:
pass
for child_entity in indexed_resource['_source']['child_entities']:
if child_entity['businesstablename'] == 'numbers':
index_resource['_source']['numbers'].append(child_entity)
indexed_resource['_source']['child_entities'].remove(child_entity)
## Reindex resource here.
se.index_data(index='entity',doc_type=indexed_resource['_type'], body=indexed_resource['_source'], id=indexed_resource['_id'])
records+=1
# if records%500 == 0:
# print '%s records processed'%str(records)
print '%s records updated' % str(records)
# print 'Patch took %s seconds to run.'%str(time.time() - start)
print "\npatch '%s' successfully applied." % __name__