diff --git a/gui/slick/interfaces/default/config.tmpl b/gui/slick/interfaces/default/config.tmpl
index 26b462e9..23d9b4ce 100644
--- a/gui/slick/interfaces/default/config.tmpl
+++ b/gui/slick/interfaces/default/config.tmpl
@@ -29,7 +29,7 @@
$sg_str('CONFIG_FILE') |
- $db.dbFilename() |
+ $db.db_filename() |
#if $db.db_supports_backup
$backup_db_path |
#end if
diff --git a/lib/sg_helpers.py b/lib/sg_helpers.py
index 18ef9cc6..41f99ef9 100644
--- a/lib/sg_helpers.py
+++ b/lib/sg_helpers.py
@@ -159,7 +159,7 @@ class ConnectionFailDict(object):
if None is not db:
with self.lock:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('connection_fails'):
+ if my_db.has_table('connection_fails'):
domains = my_db.select('SELECT DISTINCT domain_url from connection_fails')
for domain in domains:
self.domain_list[domain['domain_url']] = ConnectionFailList(domain['domain_url'])
@@ -515,7 +515,7 @@ class ConnectionFailList(object):
def _load_fail_values(self):
if None is not DATA_DIR:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('connection_fails_count'):
+ if my_db.has_table('connection_fails_count'):
r = my_db.select('SELECT * FROM connection_fails_count WHERE domain_url = ?', [self.url])
if r:
self._failure_count = try_int(r[0]['failure_count'], 0)
@@ -536,7 +536,7 @@ class ConnectionFailList(object):
def _save_fail_value(self, field, value):
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('connection_fails_count'):
+ if my_db.has_table('connection_fails_count'):
r = my_db.action('UPDATE connection_fails_count SET %s = ? WHERE domain_url = ?' % field,
[value, self.url])
if 0 == r.rowcount:
@@ -568,7 +568,7 @@ class ConnectionFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('connection_fails'):
+ if my_db.has_table('connection_fails'):
results = my_db.select('SELECT * FROM connection_fails WHERE domain_url = ?', [self.url])
self._fails = []
for r in results:
@@ -586,7 +586,7 @@ class ConnectionFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('connection_fails'):
+ if my_db.has_table('connection_fails'):
# noinspection PyCallByClass,PyTypeChecker
time_limit = _totimestamp(datetime.datetime.now() - datetime.timedelta(days=28))
my_db.action('DELETE FROM connection_fails WHERE fail_time < ?', [time_limit])
diff --git a/sickgear.py b/sickgear.py
index 9959b19b..d65b14b6 100755
--- a/sickgear.py
+++ b/sickgear.py
@@ -428,7 +428,7 @@ class SickGear(object):
('sickbeard.db', sickgear.mainDB.MIN_DB_VERSION, sickgear.mainDB.MAX_DB_VERSION,
sickgear.mainDB.TEST_BASE_VERSION, 'MainDb')
]:
- cur_db_version = db.DBConnection(d).checkDBVersion()
+ cur_db_version = db.DBConnection(d).check_db_version()
# handling of standalone TEST db versions
load_msg = 'Downgrading %s to production version' % d
@@ -437,7 +437,7 @@ class SickGear(object):
print('Your [%s] database version (%s) is a test db version and doesn\'t match SickGear required '
'version (%s), downgrading to production db' % (d, cur_db_version, max_v))
self.execute_rollback(mo, max_v, load_msg)
- cur_db_version = db.DBConnection(d).checkDBVersion()
+ cur_db_version = db.DBConnection(d).check_db_version()
if 100000 <= cur_db_version:
print(u'Rollback to production failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
@@ -452,7 +452,7 @@ class SickGear(object):
print('Your [%s] database version (%s) is a db version and doesn\'t match SickGear required '
'version (%s), downgrading to production base db' % (d, cur_db_version, max_v))
self.execute_rollback(mo, base_v, load_msg)
- cur_db_version = db.DBConnection(d).checkDBVersion()
+ cur_db_version = db.DBConnection(d).check_db_version()
if 100000 <= cur_db_version:
print(u'Rollback to production base failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
@@ -474,7 +474,7 @@ class SickGear(object):
u' what this version of SickGear supports. Trying to rollback now. Please wait...' %
(d, cur_db_version))
self.execute_rollback(mo, max_v, load_msg)
- if db.DBConnection(d).checkDBVersion() > max_v:
+ if db.DBConnection(d).check_db_version() > max_v:
print(u'Rollback failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
print(u'Rollback of [%s] successful.' % d)
diff --git a/sickgear/__init__.py b/sickgear/__init__.py
index 0bf86253..3d7be274 100644
--- a/sickgear/__init__.py
+++ b/sickgear/__init__.py
@@ -1539,19 +1539,19 @@ def init_stage_2():
# initialize main database
my_db = db.DBConnection()
- db.MigrationCode(my_db)
+ db.migration_code(my_db)
# initialize the cache database
my_db = db.DBConnection('cache.db')
- db.upgradeDatabase(my_db, cache_db.InitialSchema)
+ db.upgrade_database(my_db, cache_db.InitialSchema)
# initialize the failed downloads database
my_db = db.DBConnection('failed.db')
- db.upgradeDatabase(my_db, failed_db.InitialSchema)
+ db.upgrade_database(my_db, failed_db.InitialSchema)
# fix up any db problems
my_db = db.DBConnection()
- db.sanityCheckDatabase(my_db, mainDB.MainSanityCheck)
+ db.sanity_check_db(my_db, mainDB.MainSanityCheck)
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()
diff --git a/sickgear/databases/cache_db.py b/sickgear/databases/cache_db.py
index 87e7ea98..2332af24 100644
--- a/sickgear/databases/cache_db.py
+++ b/sickgear/databases/cache_db.py
@@ -96,16 +96,16 @@ class InitialSchema(db.SchemaUpgrade):
])
def test(self):
- return self.hasTable('lastUpdate')
+ return self.has_table('lastUpdate')
def execute(self):
self.do_query(self.queries[next(iter(self.queries))])
- self.setDBVersion(MIN_DB_VERSION, check_db_version=False)
+ self.set_db_version(MIN_DB_VERSION, check_db_version=False)
class ConsolidateProviders(InitialSchema):
def test(self):
- return 1 < self.checkDBVersion()
+ return 1 < self.call_check_db_version()
def execute(self):
keep_tables = {'lastUpdate', 'lastSearch', 'db_version',
@@ -113,13 +113,13 @@ class ConsolidateProviders(InitialSchema):
# old provider_cache is dropped before re-creation
# noinspection SqlResolve
self.do_query(['DROP TABLE [provider_cache]'] + self.queries['consolidate_providers'] +
- ['DROP TABLE [%s]' % t for t in (set(self.listTables()) - keep_tables)])
+ ['DROP TABLE [%s]' % t for t in (set(self.list_tables()) - keep_tables)])
self.finish(True)
class AddBacklogParts(ConsolidateProviders):
def test(self):
- return 2 < self.checkDBVersion()
+ return 2 < self.call_check_db_version()
def execute(self):
# noinspection SqlResolve
@@ -130,7 +130,7 @@ class AddBacklogParts(ConsolidateProviders):
class AddProviderFailureHandling(AddBacklogParts):
def test(self):
- return 3 < self.checkDBVersion()
+ return 3 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['add_provider_fails'])
@@ -139,17 +139,17 @@ class AddProviderFailureHandling(AddBacklogParts):
class AddIndexerToTables(AddProviderFailureHandling):
def test(self):
- return 4 < self.checkDBVersion()
+ return 4 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['add_indexer_to_tables'])
- self.addColumn('provider_cache', 'indexer', 'NUMERIC')
+ self.add_column('provider_cache', 'indexer', 'NUMERIC')
self.finish()
class AddGenericFailureHandling(AddBacklogParts):
def test(self):
- return 5 < self.checkDBVersion()
+ return 5 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['connection_fails'])
@@ -158,7 +158,7 @@ class AddGenericFailureHandling(AddBacklogParts):
class AddSaveQueues(AddGenericFailureHandling):
def test(self):
- return 6 < self.checkDBVersion()
+ return 6 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['save_queues'])
diff --git a/sickgear/databases/failed_db.py b/sickgear/databases/failed_db.py
index 03f66c0a..60d760a8 100644
--- a/sickgear/databases/failed_db.py
+++ b/sickgear/databases/failed_db.py
@@ -28,7 +28,7 @@ TEST_BASE_VERSION = None # the base production db version, only needed for TEST
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
- return self.hasTable('failed')
+ return self.has_table('failed')
def execute(self):
queries = [
@@ -45,18 +45,18 @@ class InitialSchema(db.SchemaUpgrade):
class SizeAndProvider(InitialSchema):
def test(self):
- return self.hasColumn('failed', 'size') and self.hasColumn('failed', 'provider')
+ return self.has_column('failed', 'size') and self.has_column('failed', 'provider')
def execute(self):
- self.addColumn('failed', 'size')
- self.addColumn('failed', 'provider', 'TEXT', '')
+ self.add_column('failed', 'size')
+ self.add_column('failed', 'provider', 'TEXT', '')
class History(SizeAndProvider):
"""Snatch history that can't be modified by the user"""
def test(self):
- return self.hasTable('history')
+ return self.has_table('history')
def execute(self):
self.connection.action('CREATE TABLE history (date NUMERIC, ' +
@@ -67,21 +67,21 @@ class HistoryStatus(History):
"""Store episode status before snatch to revert to if necessary"""
def test(self):
- return self.hasColumn('history', 'old_status')
+ return self.has_column('history', 'old_status')
def execute(self):
- self.addColumn('history', 'old_status', 'NUMERIC', Quality.NONE)
- self.addColumn('history', 'showid', 'NUMERIC', '-1')
- self.addColumn('history', 'season', 'NUMERIC', '-1')
- self.addColumn('history', 'episode', 'NUMERIC', '-1')
+ self.add_column('history', 'old_status', 'NUMERIC', Quality.NONE)
+ self.add_column('history', 'showid', 'NUMERIC', '-1')
+ self.add_column('history', 'season', 'NUMERIC', '-1')
+ self.add_column('history', 'episode', 'NUMERIC', '-1')
class AddIndexerToTables(HistoryStatus):
def test(self):
- return self.hasColumn('history', 'indexer')
+ return self.has_column('history', 'indexer')
def execute(self):
- self.addColumn('history', 'indexer', 'NUMERIC')
+ self.add_column('history', 'indexer', 'NUMERIC')
main_db = db.DBConnection('sickbeard.db')
show_ids = {s['prod_id']: s['tv_id'] for s in
@@ -91,15 +91,15 @@ class AddIndexerToTables(HistoryStatus):
cl.append(['UPDATE history SET indexer = ? WHERE showid = ?', [i, s_id]])
self.connection.mass_action(cl)
- if self.connection.hasTable('backup_history'):
+ if self.connection.has_table('backup_history'):
self.connection.action(
'REPLACE INTO history '
'(date, size, `release`, provider, old_status, showid, season, episode, indexer)'
' SELECT'
' date, size, `release`, provider, old_status, showid, season, episode, indexer'
' FROM backup_history')
- self.connection.removeTable('backup_history')
+ self.connection.remove_table('backup_history')
self.connection.action('VACUUM')
- self.setDBVersion(2, check_db_version=False)
+ self.set_db_version(2, check_db_version=False)
diff --git a/sickgear/databases/mainDB.py b/sickgear/databases/mainDB.py
index 249751c4..c51e3108 100644
--- a/sickgear/databases/mainDB.py
+++ b/sickgear/databases/mainDB.py
@@ -103,7 +103,7 @@ class MainSanityCheck(db.DBSanityCheck):
# This func would break with multi tv info sources and without tvid, so added check min db version to mitigate
# Also, tv_show table had a unique index added at some time to prevent further dupes,
# therefore, this func is kept to cleanse legacy data given that it's redundant for new row insertions
- if self.connection.checkDBVersion() < 20004:
+ if self.connection.check_db_version() < 20004:
sql_result = self.connection.select(
'SELECT show_id, %(col)s, COUNT(%(col)s) AS count FROM tv_shows GROUP BY %(col)s HAVING count > 1'
@@ -136,7 +136,7 @@ class MainSanityCheck(db.DBSanityCheck):
# This func would break with multi tv info sources and without tvid, so added check min db version to mitigate
# Also, tv_show table had a unique index added at some time to prevent further dupes,
# therefore, this func is kept to cleanse legacy data given that it's redundant for new row insertions
- if self.connection.checkDBVersion() < 20007:
+ if self.connection.check_db_version() < 20007:
sql_result = self.connection.select(
'SELECT indexer AS tv_id, showid AS prod_id, season, episode, COUNT(showid) as count'
@@ -215,18 +215,18 @@ class MainSanityCheck(db.DBSanityCheck):
logger.log('Updating TV Episode table with index idx_sta_epi_sta_air')
self.connection.action('CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season, episode, status, airdate)')
- if not self.connection.hasIndex('tv_episodes', 'idx_tv_ep_ids'):
+ if not self.connection.has_index('tv_episodes', 'idx_tv_ep_ids'):
logger.log('Updating TV Episode table with index idx_tv_ep_ids')
self.connection.action('CREATE INDEX idx_tv_ep_ids ON tv_episodes (indexer, showid)')
- if not self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_unique'):
+ if not self.connection.has_index('tv_episodes', 'idx_tv_episodes_unique'):
self.connection.action('CREATE UNIQUE INDEX idx_tv_episodes_unique ON '
'tv_episodes(indexer,showid,season,episode)')
- allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.hasTable('blocklist')]
+ allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.has_table('blocklist')]
for t in [('%slist' % allowtbl, 'show_id'), ('%slist' % blocktbl, 'show_id'),
('history', 'showid'), ('scene_exceptions', 'indexer_id')]:
- if not self.connection.hasIndex('%s' % t[0], 'idx_id_indexer_%s' % t[0]):
+ if not self.connection.has_index('%s' % t[0], 'idx_id_indexer_%s' % t[0]):
# noinspection SqlResolve
self.connection.action('CREATE INDEX idx_id_indexer_%s ON %s (indexer, %s)' % (t[0], t[0], t[1]))
@@ -309,9 +309,9 @@ class InitialSchema(db.SchemaUpgrade):
# Add new migrations at the bottom of the list; subclass the previous migration.
# 0 -> 20009
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.hasTable('tv_shows') and not self.hasTable('db_version'):
+ if not self.has_table('tv_shows') and not self.has_table('db_version'):
queries = [
# anime allow and block list
'CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)',
@@ -383,7 +383,7 @@ class InitialSchema(db.SchemaUpgrade):
self.connection.action(query)
else:
- cur_db_version = self.checkDBVersion()
+ cur_db_version = self.call_check_db_version()
if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(
@@ -403,7 +403,7 @@ class InitialSchema(db.SchemaUpgrade):
' your database may be unusable due to their modifications.'
)
- return self.checkDBVersion()
+ return self.call_check_db_version()
# 9 -> 10
@@ -413,13 +413,13 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade):
This func is only for 9->10 where older db columns exist,
those columns have since changed
"""
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.hasColumn('tv_episodes', 'file_size'):
- self.addColumn('tv_episodes', 'file_size')
+ if not self.has_column('tv_episodes', 'file_size'):
+ self.add_column('tv_episodes', 'file_size')
- if not self.hasColumn('tv_episodes', 'release_name'):
- self.addColumn('tv_episodes', 'release_name', 'TEXT', '')
+ if not self.has_column('tv_episodes', 'release_name'):
+ self.add_column('tv_episodes', 'release_name', 'TEXT', '')
sql_result = self.connection.select('SELECT episode_id, location, file_size FROM tv_episodes')
@@ -528,14 +528,14 @@ class AddSizeAndSceneNameFields(db.SchemaUpgrade):
self.connection.action('UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?',
[ep_file_name, cur_result['episode_id']])
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 10 -> 11
class RenameSeasonFolders(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
# rename the column
self.connection.action('ALTER TABLE tv_shows RENAME TO tmp_tv_shows')
@@ -558,8 +558,8 @@ class RenameSeasonFolders(db.SchemaUpgrade):
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_tv_shows')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 11 -> 12
@@ -628,7 +628,7 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade):
return result
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
# update the default quality so we dont grab the wrong qualities after migration
sickgear.QUALITY_DEFAULT = self._update_composite_qualities(sickgear.QUALITY_DEFAULT)
@@ -697,12 +697,12 @@ class Add1080pAndRawHDQualities(db.SchemaUpgrade):
[self._update_quality(cur_entry['quality']), cur_entry['showid'], cur_entry['date']]])
self.connection.mass_action(cl)
- self.incDBVersion()
+ self.inc_db_version()
# cleanup and reduce db if any previous data was removed
self.upgrade_log(u'Performing a vacuum on the database.', logger.DEBUG)
self.connection.action('VACUUM')
- return self.checkDBVersion()
+ return self.call_check_db_version()
# 12 -> 13
@@ -710,20 +710,20 @@ class AddShowidTvdbidIndex(db.SchemaUpgrade):
# Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Checking for duplicate shows before adding unique index.')
MainSanityCheck(self.connection).fix_duplicate_shows('tvdb_id')
self.upgrade_log(u'Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.')
- if not self.hasTable('idx_showid'):
+ if not self.has_table('idx_showid'):
self.connection.action('CREATE INDEX idx_showid ON tv_episodes (showid);')
- if not self.hasTable('idx_tvdb_id'):
+ if not self.has_table('idx_tvdb_id'):
# noinspection SqlResolve
self.connection.action('CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 13 -> 14
@@ -731,23 +731,23 @@ class AddLastUpdateTVDB(db.SchemaUpgrade):
# Adding column last_update_tvdb to tv_shows for controlling nightly updates
def execute(self):
- if not self.hasColumn('tv_shows', 'last_update_tvdb'):
+ if not self.has_column('tv_shows', 'last_update_tvdb'):
self.upgrade_log(u'Adding column last_update_tvdb to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'last_update_tvdb', default=1)
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'last_update_tvdb', default=1)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 14 -> 15
class AddDBIncreaseTo15(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- self.upgrade_log(u'Bumping database version to v%s' % self.checkDBVersion())
- self.incDBVersion()
- return self.checkDBVersion()
+ self.upgrade_log(u'Bumping database version to v%s' % self.call_check_db_version())
+ self.inc_db_version()
+ return self.call_check_db_version()
# 15 -> 16
@@ -755,121 +755,121 @@ class AddIMDbInfo(db.SchemaUpgrade):
def execute(self):
db_backed_up = False
- if not self.hasTable('imdb_info'):
+ if not self.has_table('imdb_info'):
self.upgrade_log(u'Creating IMDb table imdb_info')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
db_backed_up = True
self.connection.action(
'CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC,'
' akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT,'
' rating TEXT, votes INTEGER, last_update NUMERIC)')
- if not self.hasColumn('tv_shows', 'imdb_id'):
+ if not self.has_column('tv_shows', 'imdb_id'):
self.upgrade_log(u'Adding IMDb column imdb_id to tv_shows')
if not db_backed_up:
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'imdb_id')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'imdb_id')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 16 -> 17
class AddProperNamingSupport(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'imdb_id')\
- and self.hasColumn('tv_shows', 'rls_require_words')\
- and self.hasColumn('tv_shows', 'rls_ignore_words'):
- return self.setDBVersion(5816)
+ if not self.has_column('tv_shows', 'imdb_id')\
+ and self.has_column('tv_shows', 'rls_require_words')\
+ and self.has_column('tv_shows', 'rls_ignore_words'):
+ return self.set_db_version(5816)
- if not self.hasColumn('tv_episodes', 'is_proper'):
+ if not self.has_column('tv_episodes', 'is_proper'):
self.upgrade_log(u'Adding column is_proper to tv_episodes')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_episodes', 'is_proper')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_episodes', 'is_proper')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 17 -> 18
class AddEmailSubscriptionTable(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_episodes', 'is_proper')\
- and self.hasColumn('tv_shows', 'rls_require_words')\
- and self.hasColumn('tv_shows', 'rls_ignore_words')\
- and self.hasColumn('tv_shows', 'skip_notices'):
- return self.setDBVersion(5817)
+ if not self.has_column('tv_episodes', 'is_proper')\
+ and self.has_column('tv_shows', 'rls_require_words')\
+ and self.has_column('tv_shows', 'rls_ignore_words')\
+ and self.has_column('tv_shows', 'skip_notices'):
+ return self.set_db_version(5817)
- if not self.hasColumn('tv_shows', 'notify_list'):
+ if not self.has_column('tv_shows', 'notify_list'):
self.upgrade_log(u'Adding column notify_list to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'notify_list', 'TEXT', None)
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'notify_list', 'TEXT', None)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 18 -> 19
class AddProperSearch(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_episodes', 'is_proper'):
- return self.setDBVersion(12)
+ if not self.has_column('tv_episodes', 'is_proper'):
+ return self.set_db_version(12)
- if not self.hasColumn('tv_shows', 'notify_list')\
- and self.hasColumn('tv_shows', 'rls_require_words')\
- and self.hasColumn('tv_shows', 'rls_ignore_words')\
- and self.hasColumn('tv_shows', 'skip_notices')\
- and self.hasColumn('history', 'source'):
- return self.setDBVersion(5818)
+ if not self.has_column('tv_shows', 'notify_list')\
+ and self.has_column('tv_shows', 'rls_require_words')\
+ and self.has_column('tv_shows', 'rls_ignore_words')\
+ and self.has_column('tv_shows', 'skip_notices')\
+ and self.has_column('history', 'source'):
+ return self.set_db_version(5818)
- if not self.hasColumn('info', 'last_proper_search'):
+ if not self.has_column('info', 'last_proper_search'):
self.upgrade_log(u'Adding column last_proper_search to info')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('info', 'last_proper_search', default=1)
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('info', 'last_proper_search', default=1)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 19 -> 20
class AddDvdOrderOption(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'dvdorder'):
+ if not self.has_column('tv_shows', 'dvdorder'):
self.upgrade_log(u'Adding column dvdorder to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'dvdorder', 'NUMERIC', '0')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'dvdorder', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 20 -> 21
class AddSubtitlesSupport(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'subtitles'):
+ if not self.has_column('tv_shows', 'subtitles'):
self.upgrade_log(u'Adding subtitles to tv_shows and tv_episodes')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'subtitles')
- self.addColumn('tv_episodes', 'subtitles', 'TEXT', '')
- self.addColumn('tv_episodes', 'subtitles_searchcount')
- self.addColumn('tv_episodes', 'subtitles_lastsearch', 'TIMESTAMP', str(datetime.datetime.min))
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'subtitles')
+ self.add_column('tv_episodes', 'subtitles', 'TEXT', '')
+ self.add_column('tv_episodes', 'subtitles_searchcount')
+ self.add_column('tv_episodes', 'subtitles_lastsearch', 'TIMESTAMP', str(datetime.datetime.min))
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 21 -> 22
class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Converting TV Shows table to Indexer Scheme...')
- if self.hasTable('tmp_tv_shows'):
+ if self.has_table('tmp_tv_shows'):
self.upgrade_log(u'Removing temp tv show tables left behind from previous updates...')
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_tv_shows')
@@ -899,18 +899,18 @@ class ConvertTVShowsToIndexerScheme(db.SchemaUpgrade):
# noinspection SqlConstantCondition
self.connection.action('UPDATE tv_shows SET indexer = 1 WHERE 1=1')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 22 -> 23
class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Converting TV Episodes table to Indexer Scheme...')
- if self.hasTable('tmp_tv_episodes'):
+ if self.has_table('tmp_tv_episodes'):
self.upgrade_log(u'Removing temp tv episode tables left behind from previous updates...')
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_tv_episodes')
@@ -940,18 +940,18 @@ class ConvertTVEpisodesToIndexerScheme(db.SchemaUpgrade):
# noinspection SqlConstantCondition
self.connection.action('UPDATE tv_episodes SET indexer = 1 WHERE 1=1')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 23 -> 24
class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Converting IMDb Info table to Indexer Scheme...')
- if self.hasTable('tmp_imdb_info'):
+ if self.has_table('tmp_imdb_info'):
self.upgrade_log(u'Removing temp imdb info tables left behind from previous updates...')
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_imdb_info')
@@ -969,18 +969,18 @@ class ConvertIMDBInfoToIndexerScheme(db.SchemaUpgrade):
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_imdb_info')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 24 -> 25
class ConvertInfoToIndexerScheme(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Converting Info table to Indexer Scheme...')
- if self.hasTable('tmp_info'):
+ if self.has_table('tmp_info'):
self.upgrade_log(u'Removing temp info tables left behind from previous updates...')
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_info')
@@ -995,29 +995,29 @@ class ConvertInfoToIndexerScheme(db.SchemaUpgrade):
# noinspection SqlResolve
self.connection.action('DROP TABLE tmp_info')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 25 -> 26
class AddArchiveFirstMatchOption(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.hasColumn('tv_shows', 'archive_firstmatch'):
+ if not self.has_column('tv_shows', 'archive_firstmatch'):
self.upgrade_log(u'Adding column archive_firstmatch to tv_shows')
- self.addColumn('tv_shows', 'archive_firstmatch', 'NUMERIC', '0')
+ self.add_column('tv_shows', 'archive_firstmatch', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 26 -> 27
class AddSceneNumbering(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if self.hasTable('scene_numbering'):
+ if self.has_table('scene_numbering'):
self.connection.action('DROP TABLE scene_numbering')
self.upgrade_log(u'Upgrading table scene_numbering ...')
@@ -1026,14 +1026,14 @@ class AddSceneNumbering(db.SchemaUpgrade):
' scene_season INTEGER, scene_episode INTEGER,'
' PRIMARY KEY (indexer_id,season,episode))')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 27 -> 28
class ConvertIndexerToInteger(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
cl = []
self.upgrade_log(u'Converting Indexer to Integer ...')
@@ -1046,50 +1046,50 @@ class ConvertIndexerToInteger(db.SchemaUpgrade):
self.connection.mass_action(cl)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 28 -> 29
class AddRequireAndIgnoreWords(db.SchemaUpgrade):
# Adding column rls_require_words and rls_ignore_words to tv_shows
def execute(self):
- if self.hasColumn('tv_shows', 'rls_require_words') and self.hasColumn('tv_shows', 'rls_ignore_words'):
- self.incDBVersion()
- return self.checkDBVersion()
+ if self.has_column('tv_shows', 'rls_require_words') and self.has_column('tv_shows', 'rls_ignore_words'):
+ self.inc_db_version()
+ return self.call_check_db_version()
db_backed_up = False
- if not self.hasColumn('tv_shows', 'rls_require_words'):
+ if not self.has_column('tv_shows', 'rls_require_words'):
self.upgrade_log(u'Adding column rls_require_words to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
db_backed_up = True
- self.addColumn('tv_shows', 'rls_require_words', 'TEXT', '')
+ self.add_column('tv_shows', 'rls_require_words', 'TEXT', '')
- if not self.hasColumn('tv_shows', 'rls_ignore_words'):
+ if not self.has_column('tv_shows', 'rls_ignore_words'):
self.upgrade_log(u'Adding column rls_ignore_words to tv_shows')
if not db_backed_up:
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'rls_ignore_words', 'TEXT', '')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'rls_ignore_words', 'TEXT', '')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 29 -> 30
class AddSportsOption(db.SchemaUpgrade):
def execute(self):
db_backed_up = False
- if not self.hasColumn('tv_shows', 'sports'):
+ if not self.has_column('tv_shows', 'sports'):
self.upgrade_log(u'Adding column sports to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
db_backed_up = True
- self.addColumn('tv_shows', 'sports', 'NUMERIC', '0')
+ self.add_column('tv_shows', 'sports', 'NUMERIC', '0')
- if self.hasColumn('tv_shows', 'air_by_date') and self.hasColumn('tv_shows', 'sports'):
+ if self.has_column('tv_shows', 'air_by_date') and self.has_column('tv_shows', 'sports'):
# update sports column
self.upgrade_log(u'[4/4] Updating tv_shows to reflect the correct sports value...')
if not db_backed_up:
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
cl = []
history_quality = self.connection.select(
'SELECT * FROM tv_shows WHERE LOWER(classification) = "sports" AND air_by_date = 1 AND sports = 0')
@@ -1099,117 +1099,117 @@ class AddSportsOption(db.SchemaUpgrade):
cl.append(['UPDATE tv_shows SET air_by_date = 0 WHERE show_id = ?', [cur_entry['show_id']]])
self.connection.mass_action(cl)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 30 -> 31
class AddSceneNumberingToTvEpisodes(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding columns scene_season and scene_episode to tvepisodes')
- self.addColumn('tv_episodes', 'scene_season', 'NUMERIC', 'NULL')
- self.addColumn('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL')
+ self.add_column('tv_episodes', 'scene_season', 'NUMERIC', 'NULL')
+ self.add_column('tv_episodes', 'scene_episode', 'NUMERIC', 'NULL')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 31 -> 32
class AddAnimeTVShow(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding column anime to tv_episodes')
- self.addColumn('tv_shows', 'anime', 'NUMERIC', '0')
+ self.add_column('tv_shows', 'anime', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 32 -> 33
class AddAbsoluteNumbering(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding column absolute_number to tv_episodes')
- self.addColumn('tv_episodes', 'absolute_number', 'NUMERIC', '0')
+ self.add_column('tv_episodes', 'absolute_number', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 33 -> 34
class AddSceneAbsoluteNumbering(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding columns absolute_number and scene_absolute_number to scene_numbering')
- self.addColumn('scene_numbering', 'absolute_number', 'NUMERIC', '0')
- self.addColumn('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0')
+ self.add_column('scene_numbering', 'absolute_number', 'NUMERIC', '0')
+ self.add_column('scene_numbering', 'scene_absolute_number', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 34 -> 35
class AddAnimeAllowlistBlocklist(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
cl = [['CREATE TABLE allowlist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)'],
['CREATE TABLE blocklist (show_id INTEGER, range TEXT, keyword TEXT, indexer NUMERIC)']]
self.upgrade_log(u'Creating tables for anime allow and block lists')
self.connection.mass_action(cl)
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 35 -> 36
class AddSceneAbsoluteNumbering2(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding column scene_absolute_number to tv_episodes')
- self.addColumn('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0')
+ self.add_column('tv_episodes', 'scene_absolute_number', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 36 -> 37
class AddXemRefresh(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Creating table xem_refresh')
self.connection.action(
'CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER)')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 37 -> 38
class AddSceneToTvShows(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding column scene to tv_shows')
- self.addColumn('tv_shows', 'scene', 'NUMERIC', '0')
+ self.add_column('tv_shows', 'scene', 'NUMERIC', '0')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 38 -> 39
class AddIndexerMapping(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if self.hasTable('indexer_mapping'):
+ if self.has_table('indexer_mapping'):
self.connection.action('DROP TABLE indexer_mapping')
self.upgrade_log(u'Adding table indexer_mapping')
@@ -1217,44 +1217,44 @@ class AddIndexerMapping(db.SchemaUpgrade):
'CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC,'
' PRIMARY KEY (indexer_id, indexer))')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 39 -> 40
class AddVersionToTvEpisodes(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Adding columns release_group and version to tv_episodes')
- self.addColumn('tv_episodes', 'release_group', 'TEXT', '')
- self.addColumn('tv_episodes', 'version', 'NUMERIC', '-1')
+ self.add_column('tv_episodes', 'release_group', 'TEXT', '')
+ self.add_column('tv_episodes', 'version', 'NUMERIC', '-1')
self.upgrade_log(u'Adding column version to history')
- self.addColumn('history', 'version', 'NUMERIC', '-1')
+ self.add_column('history', 'version', 'NUMERIC', '-1')
- self.incDBVersion()
- return self.checkDBVersion()
+ self.inc_db_version()
+ return self.call_check_db_version()
# 40 -> 10000
class BumpDatabaseVersion(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(10000)
+ return self.set_db_version(10000)
# 41,42 -> 10001
class Migrate41(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(10001)
+ return self.set_db_version(10001)
# 43,44 -> 10001
@@ -1264,25 +1264,25 @@ class Migrate43(db.SchemaUpgrade):
db_backed_up = False
db_chg = None
table = 'tmdb_info'
- if self.hasTable(table):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ if self.has_table(table):
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
db_backed_up = True
self.upgrade_log(u'Dropping redundant table tmdb_info')
# noinspection SqlResolve
self.connection.action('DROP TABLE [%s]' % table)
db_chg = True
- if self.hasColumn('tv_shows', 'tmdb_id'):
+ if self.has_column('tv_shows', 'tmdb_id'):
if not db_backed_up:
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
db_backed_up = True
self.upgrade_log(u'Dropping redundant tmdb_info refs')
- self.dropColumn('tv_shows', 'tmdb_id')
+ self.drop_columns('tv_shows', 'tmdb_id')
db_chg = True
- if not self.hasTable('db_version'):
+ if not self.has_table('db_version'):
if not db_backed_up:
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.connection.action('PRAGMA user_version = 0')
self.connection.action('CREATE TABLE db_version (db_version INTEGER);')
self.connection.action('INSERT INTO db_version (db_version) VALUES (0);')
@@ -1290,124 +1290,124 @@ class Migrate43(db.SchemaUpgrade):
if not db_chg:
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(10001)
+ return self.set_db_version(10001)
# 4301 -> 10002
class Migrate4301(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(10002)
+ return self.set_db_version(10002)
# 4302,4400 -> 10003
class Migrate4302(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(10003)
+ return self.set_db_version(10003)
# 5816 - 5818 -> 15
class MigrateUpstream(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.checkDBVersion()).replace('58', ''))
+ self.upgrade_log(u'Migrate SickBeard db v%s into v15' % str(self.call_check_db_version()).replace('58', ''))
- return self.setDBVersion(15)
+ return self.set_db_version(15)
# 10000 -> 20000
class SickGearDatabaseVersion(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version to new SickGear standards')
- return self.setDBVersion(20000)
+ return self.set_db_version(20000)
# 10001 -> 10000
class RemoveDefaultEpStatusFromTvShows(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Dropping redundant column default_ep_status from tv_shows')
- self.dropColumn('tv_shows', 'default_ep_status')
+ self.drop_columns('tv_shows', 'default_ep_status')
- return self.setDBVersion(10000)
+ return self.set_db_version(10000)
# 10002 -> 10001
class RemoveMinorDBVersion(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Dropping redundant column db_minor_version from db_version')
- self.dropColumn('db_version', 'db_minor_version')
+ self.drop_columns('db_version', 'db_minor_version')
- return self.setDBVersion(10001)
+ return self.set_db_version(10001)
# 10003 -> 10002
class RemoveMetadataSub(db.SchemaUpgrade):
def execute(self):
- if self.hasColumn('tv_shows', 'sub_use_sr_metadata'):
+ if self.has_column('tv_shows', 'sub_use_sr_metadata'):
self.upgrade_log(u'Dropping redundant column metadata sub')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.dropColumn('tv_shows', 'sub_use_sr_metadata')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.drop_columns('tv_shows', 'sub_use_sr_metadata')
- return self.setDBVersion(10002)
+ return self.set_db_version(10002)
# 20000 -> 20001
class DBIncreaseTo20001(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log(u'Bumping database version to force a backup before new database code')
self.connection.action('VACUUM')
self.upgrade_log(u'Performed a vacuum on the database', logger.DEBUG)
- return self.setDBVersion(20001)
+ return self.set_db_version(20001)
# 20001 -> 20002
class AddTvShowOverview(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'overview'):
+ if not self.has_column('tv_shows', 'overview'):
self.upgrade_log(u'Adding column overview to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'overview', 'TEXT', '')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'overview', 'TEXT', '')
- return self.setDBVersion(20002)
+ return self.set_db_version(20002)
# 20002 -> 20003
class AddTvShowTags(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'tag'):
+ if not self.has_column('tv_shows', 'tag'):
self.upgrade_log(u'Adding tag to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'tag', 'TEXT', 'Show List')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'tag', 'TEXT', 'Show List')
- return self.setDBVersion(20003)
+ return self.set_db_version(20003)
# 20003 -> 20004
class ChangeMapIndexer(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if self.hasTable('indexer_mapping'):
+ if self.has_table('indexer_mapping'):
self.connection.action('DROP TABLE indexer_mapping')
self.upgrade_log(u'Changing table indexer_mapping')
@@ -1418,22 +1418,22 @@ class ChangeMapIndexer(db.SchemaUpgrade):
self.connection.action('CREATE INDEX IF NOT EXISTS idx_mapping ON indexer_mapping (indexer_id, indexer)')
- if not self.hasColumn('info', 'last_run_backlog'):
+ if not self.has_column('info', 'last_run_backlog'):
self.upgrade_log('Adding last_run_backlog to info')
- self.addColumn('info', 'last_run_backlog', 'NUMERIC', 1)
+ self.add_column('info', 'last_run_backlog', 'NUMERIC', 1)
self.upgrade_log(u'Moving table scene_exceptions from cache.db to sickbeard.db')
- if self.hasTable('scene_exceptions_refresh'):
+ if self.has_table('scene_exceptions_refresh'):
self.connection.action('DROP TABLE scene_exceptions_refresh')
self.connection.action('CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER)')
- if self.hasTable('scene_exceptions'):
+ if self.has_table('scene_exceptions'):
self.connection.action('DROP TABLE scene_exceptions')
self.connection.action('CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY,'
' indexer_id INTEGER KEY, show_name TEXT, season NUMERIC, custom NUMERIC)')
try:
cachedb = db.DBConnection(filename='cache.db')
- if cachedb.hasTable('scene_exceptions'):
+ if cachedb.has_table('scene_exceptions'):
sql_result = cachedb.action('SELECT * FROM scene_exceptions')
cs = []
for cur_result in sql_result:
@@ -1452,7 +1452,7 @@ class ChangeMapIndexer(db.SchemaUpgrade):
'scene_exceptions', 'scene_exceptions_refresh', 'info', 'indexer_mapping',
'db_version', 'history', 'imdb_info', 'lastUpdate', 'scene_numbering', 'tv_episodes', 'tv_shows',
'xem_refresh'}
- current_tables = set(self.listTables())
+ current_tables = set(self.list_tables())
remove_tables = list(current_tables - keep_tables)
for table in remove_tables:
# noinspection SqlResolve
@@ -1460,34 +1460,34 @@ class ChangeMapIndexer(db.SchemaUpgrade):
self.connection.action('VACUUM')
- return self.setDBVersion(20004)
+ return self.set_db_version(20004)
# 20004 -> 20005
class AddShowNotFoundCounter(db.SchemaUpgrade):
def execute(self):
- if not self.hasTable('tv_shows_not_found'):
+ if not self.has_table('tv_shows_not_found'):
self.upgrade_log(u'Adding table tv_shows_not_found')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.connection.action(
'CREATE TABLE tv_shows_not_found (indexer NUMERIC NOT NULL, indexer_id NUMERIC NOT NULL,'
' fail_count NUMERIC NOT NULL DEFAULT 0, last_check NUMERIC NOT NULL, last_success NUMERIC,'
' PRIMARY KEY (indexer_id, indexer))')
- return self.setDBVersion(20005)
+ return self.set_db_version(20005)
# 20005 -> 20006
class AddFlagTable(db.SchemaUpgrade):
def execute(self):
- if not self.hasTable('flags'):
+ if not self.has_table('flags'):
self.upgrade_log(u'Adding table flags')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.connection.action('CREATE TABLE flags (flag PRIMARY KEY NOT NULL )')
- return self.setDBVersion(20006)
+ return self.set_db_version(20006)
# 20006 -> 20007
@@ -1496,61 +1496,61 @@ class DBIncreaseTo20007(db.SchemaUpgrade):
self.upgrade_log(u'Bumping database version')
- return self.setDBVersion(20007)
+ return self.set_db_version(20007)
# 20007 -> 20008
class AddWebdlTypesTable(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.connection.action('CREATE TABLE webdl_types (dname TEXT NOT NULL , regex TEXT NOT NULL )')
- return self.setDBVersion(20008)
+ return self.set_db_version(20008)
# 20008 -> 20009
class AddWatched(db.SchemaUpgrade):
def execute(self):
# remove old table from version 20007
- if self.hasTable('tv_episodes_watched') and not self.hasColumn('tv_episodes_watched', 'clientep_id'):
+ if self.has_table('tv_episodes_watched') and not self.has_column('tv_episodes_watched', 'clientep_id'):
self.connection.action('DROP TABLE tv_episodes_watched')
self.connection.action('VACUUM')
- if not self.hasTable('tv_episodes_watched'):
+ if not self.has_table('tv_episodes_watched'):
self.upgrade_log(u'Adding table tv_episodes_watched')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.connection.action(
'CREATE TABLE tv_episodes_watched (tvep_id NUMERIC NOT NULL, clientep_id TEXT, label TEXT,'
' played NUMERIC DEFAULT 0 NOT NULL, date_watched NUMERIC NOT NULL, date_added NUMERIC,'
' status NUMERIC, location TEXT, file_size NUMERIC, hide INT default 0 not null)'
)
- return self.setDBVersion(20009)
+ return self.set_db_version(20009)
# 20009 -> 20010
class AddPrune(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'prune'):
+ if not self.has_column('tv_shows', 'prune'):
self.upgrade_log('Adding prune to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'prune', 'INT', 0)
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'prune', 'INT', 0)
- return self.setDBVersion(20010)
+ return self.set_db_version(20010)
# 20010 -> 20011
class AddIndexerToTables(db.SchemaUpgrade):
def execute(self):
sickgear.helpers.upgrade_new_naming()
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
show_ids = {s['prod_id']: s['tv_id'] for s in
self.connection.select('SELECT indexer AS tv_id, indexer_id AS prod_id FROM tv_shows')}
- allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.hasTable('blocklist')]
+ allowtbl, blocktbl = (('allow', 'block'), ('white', 'black'))[not self.connection.has_table('blocklist')]
allowtbl, blocktbl = '%slist' % allowtbl, '%slist' % blocktbl
columns = {allowtbl: 'show_id, range, keyword, indexer',
blocktbl: 'show_id, range, keyword, indexer',
@@ -1560,9 +1560,9 @@ class AddIndexerToTables(db.SchemaUpgrade):
# add missing indexer column
for t in [(allowtbl, 'show_id'), (blocktbl, 'show_id'),
('history', 'showid'), ('scene_exceptions', 'indexer_id')]:
- if not self.hasColumn(t[0], 'indexer'):
+ if not self.has_column(t[0], 'indexer'):
self.upgrade_log(u'Adding TV info support to %s table' % t[0])
- self.addColumn(t[0], 'indexer')
+ self.add_column(t[0], 'indexer')
cl = []
for s_id, i in iteritems(show_ids):
# noinspection SqlResolve
@@ -1578,11 +1578,11 @@ class AddIndexerToTables(db.SchemaUpgrade):
if 0 < self.connection.connection.total_changes:
self.upgrade_log('Removed orphaned data from %s' % t[0])
- if self.connection.hasTable('backup_%s' % t[0]):
+ if self.connection.has_table('backup_%s' % t[0]):
self.upgrade_log('Adding backup data to %s' % t[0])
self.connection.action('REPLACE INTO %s SELECT %s FROM %s' % ('%s (%s)' % (t[0], columns[t[0]]),
columns[t[0]], 'backup_%s' % t[0]))
- self.connection.removeTable('backup_%s' % t[0])
+ self.connection.remove_table('backup_%s' % t[0])
# recreate tables that have wrong primary key = indexer_id without indexer
self.upgrade_log('Adding TV info support to scene_numbering')
@@ -1626,7 +1626,7 @@ class AddIndexerToTables(db.SchemaUpgrade):
self.connection.mass_action(cl)
self.connection.action('CREATE INDEX idx_id_indexer_imdb_info ON imdb_info (indexer,indexer_id)')
- if self.connection.hasTable('backup_imdb_info'):
+ if self.connection.has_table('backup_imdb_info'):
self.upgrade_log('Adding backup data to imdb_info')
# noinspection SqlResolve
self.connection.action('REPLACE INTO imdb_info (indexer, indexer_id, imdb_id, title, year, akas, '
@@ -1634,29 +1634,29 @@ class AddIndexerToTables(db.SchemaUpgrade):
'last_update) SELECT indexer, indexer_id, imdb_id, title, year, akas, runtimes, '
'genres, countries, country_codes, certificates, rating, votes, last_update '
'FROM backup_imdb_info')
- self.connection.removeTable('backup_imdb_info')
+ self.connection.remove_table('backup_imdb_info')
# remove an index of an no longer existing column
self.upgrade_log('Changing/Re-Creating Indexes')
- if self.connection.hasIndex('tv_shows', 'idx_tvdb_id'):
- self.connection.removeIndex('tv_shows', 'idx_tvdb_id')
+ if self.connection.has_index('tv_shows', 'idx_tvdb_id'):
+ self.connection.remove_index('tv_shows', 'idx_tvdb_id')
- if self.connection.hasIndex('tv_shows', 'idx_indexer_id'):
- self.connection.removeIndex('tv_shows', 'idx_indexer_id')
+ if self.connection.has_index('tv_shows', 'idx_indexer_id'):
+ self.connection.remove_index('tv_shows', 'idx_indexer_id')
self.connection.action('CREATE UNIQUE INDEX idx_indexer_id ON tv_shows (indexer,indexer_id)')
- if self.connection.hasIndex('tv_episodes', 'idx_showid'):
- self.connection.removeIndex('tv_episodes', 'idx_showid')
+ if self.connection.has_index('tv_episodes', 'idx_showid'):
+ self.connection.remove_index('tv_episodes', 'idx_showid')
- if self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_showid_airdate'):
- self.connection.removeIndex('tv_episodes', 'idx_tv_episodes_showid_airdate')
+ if self.connection.has_index('tv_episodes', 'idx_tv_episodes_showid_airdate'):
+ self.connection.remove_index('tv_episodes', 'idx_tv_episodes_showid_airdate')
self.connection.action('CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(indexer,showid,airdate)')
- if not self.connection.hasIndex('tv_episodes', 'idx_tv_episodes_unique'):
+ if not self.connection.has_index('tv_episodes', 'idx_tv_episodes_unique'):
self.connection.action('CREATE UNIQUE INDEX idx_tv_episodes_unique ON '
'tv_episodes(indexer,showid,season,episode)')
- if self.connection.hasTable('backup_tv_episodes'):
+ if self.connection.has_table('backup_tv_episodes'):
self.upgrade_log('Adding backup data to tv_episodes')
# noinspection SqlResolve
self.connection.action('REPLACE INTO tv_episodes (episode_id, showid, indexerid, indexer, name, season, '
@@ -1668,9 +1668,9 @@ class AddIndexerToTables(db.SchemaUpgrade):
'file_size, release_name, subtitles, subtitles_searchcount, subtitles_lastsearch, '
'is_proper, scene_season, scene_episode, absolute_number, scene_absolute_number, '
'release_group, version FROM backup_tv_episodes')
- self.connection.removeTable('backup_tv_episodes')
+ self.connection.remove_table('backup_tv_episodes')
- if self.connection.hasTable('backup_tv_shows'):
+ if self.connection.has_table('backup_tv_shows'):
self.upgrade_log('Adding backup data to tv_shows')
# noinspection SqlResolve
self.connection.action('REPLACE INTO tv_shows (show_id, indexer_id, indexer, show_name, location, '
@@ -1684,25 +1684,25 @@ class AddIndexerToTables(db.SchemaUpgrade):
'notify_list, imdb_id, last_update_indexer, dvdorder, archive_firstmatch, '
'rls_require_words, rls_ignore_words, sports, anime, scene, overview, tag, prune '
'FROM backup_tv_shows')
- self.connection.removeTable('backup_tv_shows')
+ self.connection.remove_table('backup_tv_shows')
self.connection.action('VACUUM')
- return self.setDBVersion(20011)
+ return self.set_db_version(20011)
# 20011 -> 20012
class AddShowExludeGlobals(db.SchemaUpgrade):
def execute(self):
- if not self.hasColumn('tv_shows', 'rls_global_exclude_ignore'):
+ if not self.has_column('tv_shows', 'rls_global_exclude_ignore'):
self.upgrade_log('Adding rls_global_exclude_ignore, rls_global_exclude_require to tv_shows')
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
- self.addColumn('tv_shows', 'rls_global_exclude_ignore', data_type='TEXT', default='')
- self.addColumn('tv_shows', 'rls_global_exclude_require', data_type='TEXT', default='')
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ self.add_column('tv_shows', 'rls_global_exclude_ignore', data_type='TEXT', default='')
+ self.add_column('tv_shows', 'rls_global_exclude_require', data_type='TEXT', default='')
- if self.hasTable('tv_shows_exclude_backup'):
+ if self.has_table('tv_shows_exclude_backup'):
self.upgrade_log('Adding rls_global_exclude_ignore, rls_global_exclude_require from backup to tv_shows')
# noinspection SqlResolve
self.connection.mass_action([['UPDATE tv_shows SET rls_global_exclude_ignore = '
@@ -1717,15 +1717,15 @@ class AddShowExludeGlobals(db.SchemaUpgrade):
['DROP TABLE tv_shows_exclude_backup']
])
- return self.setDBVersion(20012)
+ return self.set_db_version(20012)
# 20012 -> 20013
class RenameAllowBlockListTables(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.connection.hasTable('blocklist'):
+ if not self.connection.has_table('blocklist'):
self.upgrade_log('Renaming allow/block list tables')
for old, new in (('black', 'block'), ('white', 'allow')):
@@ -1738,19 +1738,19 @@ class RenameAllowBlockListTables(db.SchemaUpgrade):
['DROP TABLE tmp_%slist' % new]
])
- return self.setDBVersion(20013)
+ return self.set_db_version(20013)
# 20013 -> 20014
class AddHistoryHideColumn(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection , 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.hasColumn('history', 'hide'):
+ if not self.has_column('history', 'hide'):
self.upgrade_log('Adding hide column to history')
- self.addColumn('history', 'hide', default=0, set_default=True)
+ self.add_column('history', 'hide', default=0, set_default=True)
- if self.hasTable('history_hide_backup'):
+ if self.has_table('history_hide_backup'):
self.upgrade_log('Restoring hide status in history from backup')
# noinspection SqlResolve
self.connection.mass_action([
@@ -1765,30 +1765,30 @@ class AddHistoryHideColumn(db.SchemaUpgrade):
['DROP TABLE history_hide_backup']
])
- return self.setDBVersion(20014)
+ return self.set_db_version(20014)
# 20014 -> 20015
class ChangeShowData(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
self.upgrade_log('Adding new data columns to tv_shows')
- self.addColumns('tv_shows', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'),
- ('network_country', 'TEXT', ''), ('network_country_code', 'TEXT', ''),
- ('network_id', 'NUMERIC'), ('network_is_stream', 'INTEGER'),
- ('src_update_timestamp', 'INTEGER')])
+ self.add_columns('tv_shows', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'),
+ ('network_country', 'TEXT', ''), ('network_country_code', 'TEXT', ''),
+ ('network_id', 'NUMERIC'), ('network_is_stream', 'INTEGER'),
+ ('src_update_timestamp', 'INTEGER')])
self.upgrade_log('Adding new data columns to tv_episodes')
- self.addColumns('tv_episodes', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'),
- ('runtime', 'NUMERIC', 0), ('timestamp', 'NUMERIC'),
- ('network', 'TEXT', ''), ('network_country', 'TEXT', ''),
- ('network_country_code', 'TEXT', ''), ('network_id', 'NUMERIC'),
- ('network_is_stream', 'INTEGER')])
+ self.add_columns('tv_episodes', [('timezone', 'TEXT', ''), ('airtime', 'NUMERIC'),
+ ('runtime', 'NUMERIC', 0), ('timestamp', 'NUMERIC'),
+ ('network', 'TEXT', ''), ('network_country', 'TEXT', ''),
+ ('network_country_code', 'TEXT', ''), ('network_id', 'NUMERIC'),
+ ('network_is_stream', 'INTEGER')])
- if not self.hasColumn('imdb_info', 'is_mini_series'):
+ if not self.has_column('imdb_info', 'is_mini_series'):
self.upgrade_log('Adding new data columns to imdb_info')
- self.addColumns('imdb_info', [('is_mini_series', 'INTEGER', 0), ('episode_count', 'NUMERIC')])
+ self.add_columns('imdb_info', [('is_mini_series', 'INTEGER', 0), ('episode_count', 'NUMERIC')])
self.upgrade_log('Adding Character and Persons tables')
@@ -1984,7 +1984,7 @@ class ChangeShowData(db.SchemaUpgrade):
self.connection.mass_action(cl)
self.connection.action('VACUUM')
- return self.setDBVersion(20015)
+ return self.set_db_version(20015)
# 20015 -> 20016
@@ -2014,8 +2014,8 @@ class ChangeTmdbID(db.SchemaUpgrade):
except (BaseException, Exception):
pass
- db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion())
- has_tmdb_backups = all(self.hasTable(_r) for _r in
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
+ has_tmdb_backups = all(self.has_table(_r) for _r in
('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes', 'backup_tmdb_indexer_mapping'))
if has_tmdb_backups:
self.upgrade_log('Checking for dupe shows in backup tables')
@@ -2091,8 +2091,8 @@ class ChangeTmdbID(db.SchemaUpgrade):
['REPLACE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer, date, status)'
' SELECT indexer_id, indexer, mindexer_id, mindexer, date, status FROM backup_tmdb_indexer_mapping'],
])[has_tmdb_backups])
- [self.connection.removeTable(_t) for _t in ('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes',
+ [self.connection.remove_table(_t) for _t in ('backup_tmdb_tv_shows', 'backup_tmdb_tv_episodes',
'backup_tmdb_indexer_mapping')]
- return self.setDBVersion(20016)
+ return self.set_db_version(20016)
diff --git a/sickgear/db.py b/sickgear/db.py
index bce8ed81..2e70ba16 100644
--- a/sickgear/db.py
+++ b/sickgear/db.py
@@ -37,6 +37,7 @@ from six import iterkeys, iteritems, itervalues
# noinspection PyUnreachableCode
if False:
+ # noinspection PyUnresolvedReferences
from typing import Any, AnyStr, Dict, List, Optional, Tuple, Union
@@ -47,7 +48,7 @@ db_support_upsert = (3, 25, 0) <= sqlite3.sqlite_version_info # type: bool
db_supports_backup = hasattr(sqlite3.Connection, 'backup') and (3, 6, 11) <= sqlite3.sqlite_version_info # type: bool
-def dbFilename(filename='sickbeard.db', suffix=None):
+def db_filename(filename='sickbeard.db', suffix=None):
# type: (AnyStr, Optional[AnyStr]) -> AnyStr
"""
@param filename: The sqlite database filename to use. If not specified,
@@ -70,7 +71,7 @@ def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True):
:param value_dict: dict of values to be set {'table_fieldname': value}
:param key_dict: dict of restrains for update {'table_fieldname': value}
:param sanitise: True to remove k, v pairs in keyDict from valueDict as they must not exist in both.
- This option has a performance hit so it's best to remove key_dict keys from value_dict and set this False instead.
+ This option has a performance hit, so it's best to remove key_dict keys from value_dict and set this False instead.
:type sanitise: Boolean
:return: list of 2 sql command
"""
@@ -104,9 +105,9 @@ class DBConnection(object):
from . import helpers
self.new_db = False
- db_src = dbFilename(filename)
+ db_src = db_filename(filename)
if not os.path.isfile(db_src):
- db_alt = dbFilename('sickrage.db')
+ db_alt = db_filename('sickrage.db')
if os.path.isfile(db_alt):
helpers.copy_file(db_alt, db_src)
@@ -143,6 +144,7 @@ class DBConnection(object):
logger.log('Backup target file already exists', logger.ERROR)
return False, 'Backup target file already exists'
+ # noinspection PyUnusedLocal
def progress(status, remaining, total):
logger.log('Copied %s of %s pages...' % (total - remaining, total), logger.DEBUG)
@@ -167,11 +169,11 @@ class DBConnection(object):
return True, 'Backup successful'
- def checkDBVersion(self):
+ def check_db_version(self):
# type: (...) -> int
try:
- if self.hasTable('db_version'):
+ if self.has_table('db_version'):
result = self.select('SELECT db_version FROM db_version')
else:
version = self.select('PRAGMA user_version')[0]['user_version']
@@ -185,7 +187,7 @@ class DBConnection(object):
if result:
version = int(result[0]['db_version'])
- if 10000 > version and self.hasColumn('db_version', 'db_minor_version'):
+ if 10000 > version and self.has_column('db_version', 'db_minor_version'):
# noinspection SqlResolve
minor = self.select('SELECT db_minor_version FROM db_version')
return version * 100 + int(minor[0]['db_minor_version'])
@@ -313,7 +315,7 @@ class DBConnection(object):
+ ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict)))
self.action(query, list(value_dict.values()) + list(key_dict.values()))
- def tableInfo(self, table_name):
+ def table_info(self, table_name):
# type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]]
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
@@ -331,38 +333,32 @@ class DBConnection(object):
d[col[0]] = row[idx]
return d
- def hasTable(self, table_name):
+ def has_table(self, table_name):
# type: (AnyStr) -> bool
return 0 < len(self.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)))
- def hasColumn(self, table_name, column):
+ def has_column(self, table_name, column):
# type: (AnyStr, AnyStr) -> bool
- return column in self.tableInfo(table_name)
+ return column in self.table_info(table_name)
- def hasIndex(self, table_name, index):
+ def has_index(self, table_name, index):
# type: (AnyStr, AnyStr) -> bool
- sqlResults = self.select('PRAGMA index_list([%s])' % table_name)
- for result in sqlResults:
+ sql_results = self.select('PRAGMA index_list([%s])' % table_name)
+ for result in sql_results:
if result['name'] == index:
return True
return False
- def removeIndex(self, table, name):
+ def remove_index(self, table, name):
# type: (AnyStr, AnyStr) -> None
- if self.hasIndex(table, name):
+ if self.has_index(table, name):
self.action('DROP INDEX' + ' [%s]' % name)
- def removeTable(self, name):
+ def remove_table(self, name):
# type: (AnyStr) -> None
- if self.hasTable(name):
+ if self.has_table(name):
self.action('DROP TABLE' + ' [%s]' % name)
- # noinspection SqlResolve
- def addColumn(self, table, column, data_type='NUMERIC', default=0):
- # type: (AnyStr, AnyStr, AnyStr, Any) -> None
- self.action('ALTER TABLE [%s] ADD %s %s' % (table, column, data_type))
- self.action('UPDATE [%s] SET %s = ?' % (table, column), (default,))
-
def has_flag(self, flag_name):
# type: (AnyStr) -> bool
sql_result = self.select('SELECT flag FROM flags WHERE flag = ?', [flag_name])
@@ -415,7 +411,7 @@ class DBConnection(object):
logger.load_log('Upgrading %s' % self.filename, to_log, log_level)
-def sanityCheckDatabase(connection, sanity_check):
+def sanity_check_db(connection, sanity_check):
sanity_check(connection).check()
@@ -427,36 +423,36 @@ class DBSanityCheck(object):
pass
-def upgradeDatabase(connection, schema):
+def upgrade_database(connection, schema):
logger.log(u'Checking database structure...', logger.MESSAGE)
connection.is_upgrading = False
- connection.new_db = 0 == connection.checkDBVersion()
- _processUpgrade(connection, schema)
+ connection.new_db = 0 == connection.check_db_version()
+ _process_upgrade(connection, schema)
if connection.is_upgrading:
connection.upgrade_log('Finished')
-def prettyName(class_name):
+def _pretty_name(class_name):
# type: (AnyStr) -> AnyStr
return ' '.join([x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)])
-def restoreDatabase(filename, version):
+def _restore_database(filename, version):
logger.log(u'Restoring database before trying upgrade again')
- if not sickgear.helpers.restore_versioned_file(dbFilename(filename=filename, suffix='v%s' % version), version):
+ if not sickgear.helpers.restore_versioned_file(db_filename(filename=filename, suffix='v%s' % version), version):
logger.log_error_and_exit(u'Database restore failed, abort upgrading database')
return False
return True
-def _processUpgrade(connection, upgrade_class):
+def _process_upgrade(connection, upgrade_class):
instance = upgrade_class(connection)
- logger.log('Checking %s database upgrade' % prettyName(upgrade_class.__name__), logger.DEBUG)
+ logger.log('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__), logger.DEBUG)
if not instance.test():
connection.is_upgrading = True
- connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or prettyName(upgrade_class.__name__))
- logger.log('Database upgrade required: %s' % prettyName(upgrade_class.__name__), logger.MESSAGE)
- db_version = connection.checkDBVersion()
+ connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or _pretty_name(upgrade_class.__name__))
+ logger.log('Database upgrade required: %s' % _pretty_name(upgrade_class.__name__), logger.MESSAGE)
+ db_version = connection.check_db_version()
try:
# only do backup if it's not a new db
0 < db_version and backup_database(connection, connection.filename, db_version)
@@ -468,7 +464,7 @@ def _processUpgrade(connection, upgrade_class):
# close db before attempting restore
connection.close()
- if restoreDatabase(connection.filename, db_version):
+ if _restore_database(connection.filename, db_version):
logger.log_error_and_exit('Successfully restored database version: %s' % db_version)
else:
logger.log_error_and_exit('Failed to restore database version: %s' % db_version)
@@ -480,7 +476,7 @@ def _processUpgrade(connection, upgrade_class):
logger.log('%s upgrade not required' % upgrade_class.__name__, logger.DEBUG)
for upgradeSubClass in upgrade_class.__subclasses__():
- _processUpgrade(connection, upgradeSubClass)
+ _process_upgrade(connection, upgradeSubClass)
# Base migration class. All future DB changes should be subclassed from this class
@@ -488,11 +484,11 @@ class SchemaUpgrade(object):
def __init__(self, connection, **kwargs):
self.connection = connection
- def hasTable(self, table_name):
+ def has_table(self, table_name):
return 0 < len(self.connection.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)))
- def hasColumn(self, table_name, column):
- return column in self.connection.tableInfo(table_name)
+ def has_column(self, table_name, column):
+ return column in self.connection.table_info(table_name)
def list_tables(self):
# type: (...) -> List[AnyStr]
@@ -511,13 +507,13 @@ class SchemaUpgrade(object):
['index'])]
# noinspection SqlResolve
- def addColumn(self, table, column, data_type='NUMERIC', default=0, set_default=False):
+ def add_column(self, table, column, data_type='NUMERIC', default=0, set_default=False):
self.connection.action('ALTER TABLE [%s] ADD %s %s%s' %
(table, column, data_type, ('', ' DEFAULT "%s"' % default)[set_default]))
self.connection.action('UPDATE [%s] SET %s = ?' % (table, column), (default,))
# noinspection SqlResolve
- def addColumns(self, table, column_list=None):
+ def add_columns(self, table, column_list=None):
# type: (AnyStr, List) -> None
if isinstance(column_list, list):
sql = []
@@ -535,25 +531,21 @@ class SchemaUpgrade(object):
if sql:
self.connection.mass_action(sql)
- def dropColumn(self, table, columns):
- # type: (AnyStr, AnyStr) -> None
- self.drop_columns(table, columns)
-
def drop_columns(self, table, column):
# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> None
# get old table columns and store the ones we want to keep
result = self.connection.select('pragma table_info([%s])' % table)
columns_list = ([column], column)[isinstance(column, list)]
- keptColumns = list(filter(lambda col: col['name'] not in columns_list, result))
+ kept_columns = list(filter(lambda col: col['name'] not in columns_list, result))
- keptColumnsNames = []
+ kept_columns_names = []
final = []
pk = []
# copy the old table schema, column by column
- for column in keptColumns:
+ for column in kept_columns:
- keptColumnsNames.append(column['name'])
+ kept_columns_names.append(column['name'])
cl = [column['name'], column['type']]
@@ -574,7 +566,7 @@ class SchemaUpgrade(object):
# join all the table column creation fields
final = ', '.join(final)
- keptColumnsNames = ', '.join(keptColumnsNames)
+ kept_columns_names = ', '.join(kept_columns_names)
# generate sql for the new table creation
if 0 == len(pk):
@@ -586,12 +578,12 @@ class SchemaUpgrade(object):
# create new temporary table and copy the old table data across, barring the removed column
self.connection.action(sql)
# noinspection SqlResolve
- self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, keptColumnsNames, table))
+ self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, kept_columns_names, table))
# copy the old indexes from the old table
result = self.connection.select("SELECT sql FROM sqlite_master WHERE tbl_name=? AND type='index'", [table])
- # remove the old table and rename the new table to take it's place
+ # remove the old table and rename the new table to take its place
# noinspection SqlResolve
self.connection.action('DROP TABLE [%s]' % table)
# noinspection SqlResolve
@@ -605,22 +597,19 @@ class SchemaUpgrade(object):
# vacuum the db as we will have a lot of space to reclaim after dropping tables
self.connection.action('VACUUM')
- def checkDBVersion(self):
- return self.connection.checkDBVersion()
+ def call_check_db_version(self):
+ return self.connection.check_db_version()
- def incDBVersion(self):
- new_version = self.checkDBVersion() + 1
+ def inc_db_version(self):
+ new_version = self.call_check_db_version() + 1
# noinspection SqlConstantCondition
self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version])
return new_version
- def setDBVersion(self, new_version, check_db_version=True):
+ def set_db_version(self, new_version, check_db_version=True):
# noinspection SqlConstantCondition
self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version])
- return check_db_version and self.checkDBVersion()
-
- def listTables(self):
- return self.list_tables()
+ return check_db_version and self.call_check_db_version()
def do_query(self, queries):
if not isinstance(queries, list):
@@ -630,23 +619,23 @@ class SchemaUpgrade(object):
for query in queries:
tbl_name = re.findall(r'(?i)DROP.*?TABLE.*?\[?([^\s\]]+)', query)
- if tbl_name and not self.hasTable(tbl_name[0]):
+ if tbl_name and not self.has_table(tbl_name[0]):
continue
tbl_name = re.findall(r'(?i)CREATE.*?TABLE.*?\s([^\s(]+)\s*\(', query)
- if tbl_name and self.hasTable(tbl_name[0]):
+ if tbl_name and self.has_table(tbl_name[0]):
continue
self.connection.action(query)
def finish(self, tbl_dropped=False):
if tbl_dropped:
self.connection.action('VACUUM')
- self.incDBVersion()
+ self.inc_db_version()
def upgrade_log(self, *args, **kwargs):
self.connection.upgrade_log(*args, **kwargs)
-def MigrationCode(my_db):
+def migration_code(my_db):
schema = {
0: sickgear.mainDB.InitialSchema,
9: sickgear.mainDB.AddSizeAndSceneNameFields,
@@ -719,7 +708,7 @@ def MigrationCode(my_db):
# 20002: sickgear.mainDB.AddCoolSickGearFeature3,
}
- db_version = my_db.checkDBVersion()
+ db_version = my_db.check_db_version()
my_db.new_db = 0 == db_version
logger.log(u'Detected database version: v%s' % db_version, logger.DEBUG)
@@ -746,7 +735,7 @@ def MigrationCode(my_db):
my_db.close()
logger.log(u'Failed to update database with error: %s attempting recovery...' % ex(e), logger.ERROR)
- if restoreDatabase(my_db.filename, db_version):
+ if _restore_database(my_db.filename, db_version):
# initialize the main SB database
logger.log_error_and_exit(u'Successfully restored database version: %s' % db_version)
else:
@@ -777,7 +766,7 @@ def backup_database(db_connection, filename, version):
return
logger.log(u'Backing up database before upgrade')
- if not sickgear.helpers.backup_versioned_file(dbFilename(filename), version):
+ if not sickgear.helpers.backup_versioned_file(db_filename(filename), version):
logger.log_error_and_exit(u'Database backup failed, abort upgrading database')
else:
logger.log(u'Proceeding with upgrade')
@@ -841,7 +830,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True):
optional compress with zip or 7z (python 3 only, external lib py7zr required)
7z falls back to zip if py7zr is not available
- :param target: target folder to backup to
+ :param target: target folder for backup db
:param compress: compress db backups
:param prefer_7z: prefer 7z compression if available
:return: success, message
diff --git a/sickgear/providers/generic.py b/sickgear/providers/generic.py
index a75600be..126cc747 100644
--- a/sickgear/providers/generic.py
+++ b/sickgear/providers/generic.py
@@ -193,7 +193,7 @@ class ProviderFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('provider_fails'):
+ if my_db.has_table('provider_fails'):
results = my_db.select('SELECT * FROM provider_fails WHERE prov_name = ?', [self.provider_name()])
self._fails = []
for r in results:
@@ -210,7 +210,7 @@ class ProviderFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('provider_fails'):
+ if my_db.has_table('provider_fails'):
# noinspection PyCallByClass,PyTypeChecker
time_limit = int(timestamp_near(datetime.datetime.now() - datetime.timedelta(days=28)))
my_db.action('DELETE FROM provider_fails WHERE fail_time < ?', [time_limit])
@@ -281,7 +281,7 @@ class GenericProvider(object):
def _load_fail_values(self):
if hasattr(sickgear, 'DATA_DIR'):
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('provider_fails_count'):
+ if my_db.has_table('provider_fails_count'):
r = my_db.select('SELECT * FROM provider_fails_count WHERE prov_name = ?', [self.get_id()])
if r:
self._failure_count = helpers.try_int(r[0]['failure_count'], 0)
@@ -302,7 +302,7 @@ class GenericProvider(object):
def _save_fail_value(self, field, value):
my_db = db.DBConnection('cache.db')
- if my_db.hasTable('provider_fails_count'):
+ if my_db.has_table('provider_fails_count'):
r = my_db.action('UPDATE provider_fails_count SET %s = ? WHERE prov_name = ?' % field,
[value, self.get_id()])
if 0 == r.rowcount:
diff --git a/sickgear/tvcache.py b/sickgear/tvcache.py
index 0cb50660..cdcb4b8a 100644
--- a/sickgear/tvcache.py
+++ b/sickgear/tvcache.py
@@ -42,7 +42,7 @@ class CacheDBConnection(db.DBConnection):
# Create the table if it's not already there
try:
- if not self.hasTable('lastUpdate'):
+ if not self.has_table('lastUpdate'):
self.action('CREATE TABLE lastUpdate (provider TEXT, time NUMERIC)')
except (BaseException, Exception) as e:
if ex(e) != 'table lastUpdate already exists':
diff --git a/tests/migration_tests.py b/tests/migration_tests.py
index 1682afdb..2a2d3a6c 100644
--- a/tests/migration_tests.py
+++ b/tests/migration_tests.py
@@ -48,7 +48,7 @@ class MigrationBasicTests(test.SickbeardTestDBCase):
update.execute()
sleep(0.1)
- db.MigrationCode(my_db)
+ db.migration_code(my_db)
my_db.close()
# force python to garbage collect all db connections, so that the file can be deleted
@@ -67,9 +67,9 @@ class MigrationBasicTests(test.SickbeardTestDBCase):
# 0 -> 31
class OldInitialSchema(db.SchemaUpgrade):
def execute(self):
- db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion())
+ db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
- if not self.hasTable('tv_shows') and not self.hasTable('db_version'):
+ if not self.has_table('tv_shows') and not self.has_table('db_version'):
queries = [
'CREATE TABLE db_version (db_version INTEGER);',
'CREATE TABLE history ('
@@ -105,7 +105,7 @@ class OldInitialSchema(db.SchemaUpgrade):
self.connection.action(query)
else:
- cur_db_version = self.checkDBVersion()
+ cur_db_version = self.call_check_db_version()
if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(
@@ -127,13 +127,13 @@ class OldInitialSchema(db.SchemaUpgrade):
' your database may be unusable due to their modifications.'
)
- return self.checkDBVersion()
+ return self.call_check_db_version()
class AddDefaultEpStatusToTvShows(db.SchemaUpgrade):
def execute(self):
- self.addColumn('tv_shows', 'default_ep_status', 'TEXT', '')
- self.setDBVersion(41, check_db_version=False)
+ self.add_column('tv_shows', 'default_ep_status', 'TEXT', '')
+ self.set_db_version(41, check_db_version=False)
if '__main__' == __name__:
diff --git a/tests/test_lib.py b/tests/test_lib.py
index d8be545b..e3fe8be3 100644
--- a/tests/test_lib.py
+++ b/tests/test_lib.py
@@ -195,16 +195,16 @@ def setup_test_db():
"""upgrades the db to the latest version
"""
# upgrading the db
- db.MigrationCode(db.DBConnection())
+ db.migration_code(db.DBConnection())
# fix up any db problems
- db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck)
+ db.sanity_check_db(db.DBConnection(), mainDB.MainSanityCheck)
# and for cachedb too
- db.upgradeDatabase(db.DBConnection('cache.db'), cache_db.InitialSchema)
+ db.upgrade_database(db.DBConnection('cache.db'), cache_db.InitialSchema)
# and for faileddb too
- db.upgradeDatabase(db.DBConnection('failed.db'), failed_db.InitialSchema)
+ db.upgrade_database(db.DBConnection('failed.db'), failed_db.InitialSchema)
def teardown_test_db():