本文整理汇总了Python中dynamic_scraper.models.SchedulerRuntime.save方法的典型用法代码示例。如果您正苦于以下问题:Python SchedulerRuntime.save方法的具体用法?Python SchedulerRuntime.save怎么用?Python SchedulerRuntime.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dynamic_scraper.models.SchedulerRuntime
的用法示例。
在下文中一共展示了SchedulerRuntime.save方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_double_standard_id_field
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def test_double_standard_id_field(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title=u'Event 1', event_website=self.event_website,
description=u'Event 1 description',
url=u'http://localhost:8010/static/site_generic/event5.html',
checker_runtime=checker_rt)
event.save()
event = Event(title=u'Event 2', event_website=self.event_website,
description=u'Event 1 description',
url=u'http://localhost:8010/static/site_generic/event6.html',
checker_runtime=checker_rt)
event.save()
event = Event(title=u'Event 1', event_website=self.event_website,
description=u'Event 2 description',
url=u'http://localhost:8010/static/site_generic/event7.html',
checker_runtime=checker_rt)
event.save()
self.soa_url.id_field = False
self.soa_url.save()
self.soa_title.id_field = True
self.soa_title.save()
self.soa_desc.id_field = True
self.soa_desc.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 6)
self.assertEqual(Event.objects.filter(description='Event 1 description').count(), 2)
示例2: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
if spider.conf['DO_ACTION']: # Necessary since DDS v.0.9+
try:
item['races_website'] = spider.ref_object
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item['date'] = self.process_date(item['date'], spider)
item['city'] = self.process_city(item['city'], spider)
item['province'] = self.process_province(item['province'], spider)
item.save()
spider.action_successful = True
spider.log("Item saved.", logging.INFO)
except IntegrityError as e:
spider.log(str(e), logging.ERROR)
spider.log(str(item._errors), logging.ERROR)
raise DropItem("Missing attribute.")
else:
if not item.is_valid():
spider.log(str(item._errors), logging.ERROR)
raise DropItem("Missing attribute.")
return item
示例3: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
try:
item['news_website'] = spider.ref_object
item['search_term'] = spider.search_terms #I added this its how we see what was searched per item
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item.save()
# p1 = Den.objects.get(title='baby')
# a1 = Article(search_term=spider.search_terms)
# a1.dens.add(p1)
# busi = item.save(commit=False)
# p1 = Den.objects.get(title='pretty')
# busi.dens.add(p1)
# p1 = Den(title='pretty')
# a1 = Article(search_term=spider.search_terms)
# a1.dens.add(p1)
# p1 = Den(title='pretty')
# a1 = Article(search_term=spider.search_terms)
# a1.dens.add(p1)
spider.action_successful = True
spider.log("Item saved.", log.INFO)
except IntegrityError, e:
spider.log(str(e), log.ERROR)
raise DropItem("Missing attribute.")
示例4: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
if isinstance(spider,ProductSpider):
#spider.log("spider: " + spider.name)
spider.log("item time is: " + item['time'])
item['time']=process_date(item['time'])
# to do:
# drop item if price is null
# drop item if time > no
try:
#if (item == ArticleItem):
#item['news_website'] = spider.ref_object
#else:
item['source'] = spider.ref_object
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item.save()
spider.action_successful = True
spider.log("Item saved.", log.INFO)
except IntegrityError, e:
spider.log(str(e), log.ERROR)
raise DropItem("Missing attribute.")
示例5: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
try:
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item['source'] = spider.ref_object
try:
item_model = item_to_model(item)
except TypeError:
return item
model, created = get_or_create(item_model)
update_model(model, item_model)
if created:
spider.log('==' + model.name + '== created.', log.INFO)
else:
spider.log('==' + model.name + '== updated.', log.INFO)
spider.action_successful = True
except IntegrityError, e:
spider.log(str(e), log.ERROR)
raise DropItem("Missing attribute.")
示例6: test_double
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def test_double(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title=u'Event 1', url=u'http://localhost:8010/static/site_generic/event1.html',
checker_runtime=checker_rt)
event.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 4)
self.assertEqual(len(Event.objects.filter(title='Event 1')), 1)
示例7: add_listing_checker
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def add_listing_checker(listing):
listing_source_cfg = ListingSourceScraperConfig.objects.get(pk=listing.listing_source_id)
checker_rt = SchedulerRuntime(runtime_type='C', next_action_time=timezone.now() + timedelta(days=1))
checker_rt.save()
checker_config = ListingCheckerConfig(listing=listing, checker_runtime=checker_rt, scraper=listing_source_cfg.scraper)
checker_config.save()
return checker_config
示例8: test_detail_page_url_id_field
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def test_detail_page_url_id_field(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title=u'Event 1', event_website=self.event_website,
url=u'http://localhost:8010/static/site_generic/event5.html',
checker_runtime=checker_rt)
event.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 5)
self.assertEqual(Event.objects.filter(title='Event 1').count(), 2)
示例9: test_single_standard_id_field
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def test_single_standard_id_field(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title='Event 1', event_website=self.event_website,
url='http://localhost:8010/static/site_generic/event5.html',
checker_runtime=checker_rt)
event.save()
self.soa_url.id_field = False
self.soa_url.save()
self.soa_title.id_field = True
self.soa_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 4)
self.assertEqual(Event.objects.filter(title='Event 1').count(), 1)
示例10: extraSetUpHTMLChecker
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def extraSetUpHTMLChecker(self):
self.scraper.checker_type = 'X'
self.scraper.checker_x_path = u'//div[@class="event_not_found"]/div/text()'
self.scraper.checker_x_path_result = u'Event not found!'
self.scraper.checker_ref_url = u'http://localhost:8010/static/site_with_json_content_type/event_not_found.html'
self.scraper.save()
scheduler_rt = SchedulerRuntime()
scheduler_rt.save()
self.event = Event(title='Event 1', event_website=self.event_website,
description='Event 1 description',
url='http://localhost:8010/static/site_with_json_content_type/event_not_found.html',
checker_runtime=scheduler_rt)
self.event.save()
示例11: setUp
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def setUp(self):
super(CheckerRunTest, self).setUp()
self.scraper.checker_x_path = u'//div[@class="event_not_found"]/div/text()'
self.scraper.checker_x_path_result = u'Event was deleted!'
self.scraper.save()
scheduler_rt = SchedulerRuntime()
scheduler_rt.save()
self.event = Event(title='Event 1', event_website=self.event_website,
description='Event 1 description',
url='http://localhost:8010/static/site_for_checker/event1.html',
checker_runtime=scheduler_rt)
self.event.save()
示例12: test_standard_update_field_update
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def test_standard_update_field_update(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title=u'Event 1 - Old Title', event_website=self.event_website,
url=u'http://localhost:8010/static/site_generic/event1.html',
checker_runtime=checker_rt)
event.save()
self.soa_title.attr_type = 'T'
self.soa_title.save()
self.run_event_spider(1)
event_updated = Event.objects.get(pk=event.id)
self.assertEqual(event_updated.title, 'Event 1')
self.assertEqual(len(Event.objects.filter(title='Event 1 - Old Title')), 0)
示例13: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
try:
item['news_website'] = spider.ref_object
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item.save()
spider.action_successful = True
spider.log("Item saved.", log.INFO)
except IntegrityError, e:
spider.log(str(e), log.ERROR)
raise DropItem("Missing attribute.")
示例14: process_item
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def process_item(self, item, spider):
if spider.conf['DO_ACTION']:
try:
item['job_website'] = spider.ref_object
checker_rt = SchedulerRuntime(runtime_type='C')
checker_rt.save()
item['checker_runtime'] = checker_rt
item.save()
spider.action_successful = True
spider.log("Items saved in the DB", logging.INFO)
except IntegrityError, e:
spider.log(str(e), logging.ERROR)
raise DropItem("missing attrib")
示例15: setUpScraperJSChecker
# 需要导入模块: from dynamic_scraper.models import SchedulerRuntime [as 别名]
# 或者: from dynamic_scraper.models.SchedulerRuntime import save [as 别名]
def setUpScraperJSChecker(self, path):
super(ScraperJSRunTest, self).setUp()
self.scraper.checker_type = 'X'
self.scraper.checker_x_path = u'//div[@class="event_not_found"]/div/text()'
self.scraper.checker_ref_url = u'%ssite_with_js/event_not_found.html' % path
self.scraper.save()
scheduler_rt = SchedulerRuntime()
scheduler_rt.save()
self.event = Event(title='Event 1', event_website=self.event_website,
description='Event 1 description',
url='%ssite_with_js/event_not_found.html' % path,
checker_runtime=scheduler_rt)
self.event.save()