本文整理汇总了Python中models.DBSession.rollback方法的典型用法代码示例。如果您正苦于以下问题:Python DBSession.rollback方法的具体用法?Python DBSession.rollback怎么用?Python DBSession.rollback使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类models.DBSession
的用法示例。
在下文中一共展示了DBSession.rollback方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: License
# 需要导入模块: from models import DBSession [as 别名]
# 或者: from models.DBSession import rollback [as 别名]
email=data.get('author_email'))
session.add(a)
a = Author.query.filter_by(name=data['author']).one()
r.author = a
if 'license' in data:
query = License.query.filter_by(name=data['license'])
if query.count() == 0:
l = License(name=data['license'])
session.add(l)
l = License.query.filter_by(name=data['license']).one()
r.license = l
session.add(r)
session.commit()
if __name__ == '__main__':
print "Initializing Smarmy..."
engine = create_engine('sqlite:///smarmy.db')
initialize_sql(engine)
try:
populate()
print "Complete!"
except IntegrityError, e:
print "Got an Integrity Error:", str(e)
DBSession.rollback()
示例2: _initdb
# 需要导入模块: from models import DBSession [as 别名]
# 或者: from models.DBSession import rollback [as 别名]
def _initdb(csvfilename, drop_db=False):
session = DBSession()
from models import Name, Addrobj
# if drop_db:
# Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
reader = csv.reader(open(csvfilename))
# next(reader, None) # skip the headers
i = 0
for line in reader:
line = [s.replace("'", "''") for s in line]
(
actstatus,
aoguid,
aoid,
aolevel,
areacode,
autocode,
centstatus,
citycode,
code,
currstatus,
enddate,
formalname,
ifnsfl,
ifnsul,
nextid,
offname,
okato,
oktmo,
operstatus,
parentguid,
placecode,
plaincode,
postalcode,
previd,
regioncode,
shortname,
startdate,
streetcode,
terrifnsfl,
terrifnsul,
updatedate,
ctarcode,
extrcode,
sextcode,
livestatus,
normdoc
) = line
try:
name = Name(
name=formalname,
name_tsvect=formalname,
name_tsquery=formalname)
ts_query1 = name.name_tsquery
session.add(name)
session.commit()
except:
session.rollback()
try:
name = Name(
name=offname,
name_tsvect=offname,
name_tsquery=offname)
ts_query2 = name.name_tsquery
session.add(name)
session.commit()
except:
session.rollback()
# Different names can be translated to one name_tsquery
# so we search name_tsquery to prevent storing of small differences
# in the DB
names = session.query(Name).filter(Name.name_tsquery.in_([
ts_query1, ts_query2
]))
names = names.all()
place = Addrobj(
actstatus=actstatus,
aoguid=aoguid,
aoid=aoid,
aolevel=aolevel,
areacode=areacode,
autocode=autocode,
centstatus=centstatus,
citycode=citycode,
code=code,
currstatus=currstatus,
formalname=formalname,
ifnsfl=ifnsfl,
ifnsul=ifnsul,
nextid=nextid,
offname=offname,
operstatus=operstatus,
#.........这里部分代码省略.........
示例3: update_links
# 需要导入模块: from models import DBSession [as 别名]
# 或者: from models.DBSession import rollback [as 别名]
def update_links(
rss_feed_url
):
"""Inserts new announcements into the database.
Notes:
* Publishing date is the date the property is listed for sale. It might be very old,
* We insert all entries in the rss feed to the database. The url field is unique so duplicates are not allowed.
* When querying new entries, keep in mind to query the date based on ( timestamp = 'today' & pubDate =
'close enough' )so only new listed properties are queried.
Parameters
----------
rss_feed_url : str
"""
feed = feedparser.parse(rss_feed_url)
entries = feed['entries']
num_new_links = 0
print('Updating liks database ..')
session = DBSession()
url_rs = session.query(Link.url)
url_list = [url for (url,) in url_rs]
session.close()
session = DBSession()
browser = logged_in_browser()
for cnt, entry in enumerate(entries):
link = entry['link']
published_str = entry['published']
print(cnt, ':', link)
if link in url_list:
print('duplicate url, passing..')
continue
published = date_parser.parse(published_str)
pubDate = datetime.fromordinal(published.toordinal())
new_link = Link(url=link, date=pubDate)
session.add(new_link)
time.sleep(random.choice(range(20, 60))/10)
try:
data = crawl_hemnet_page(new_link.url, browser=browser)
except Exception as e:
print('Error crawling hemnet page.', e.message)
continue
new_apt = Apartment(**data)
new_apt.link = new_link
session.add(new_apt)
try:
session.commit()
num_new_links += 1
except IntegrityError as e:
print(e.message)
print(link)
session.rollback()
except Exception as e:
print(e.message)
session.rollback()
finally:
session = DBSession()
print('Done!')
print('%s new links added.' % num_new_links)