Change rename db variables/functions.

This commit is contained in:
JackDandy 2023-02-24 11:46:07 +00:00
parent 32987134ba
commit ec874504de
12 changed files with 412 additions and 423 deletions

View file

@ -29,7 +29,7 @@
</td>
</tr>
<tr><td class="infoTableHeader">Config file:</td><td class="infoTableCell">$sg_str('CONFIG_FILE')</td></tr>
<tr><td class="infoTableHeader">Database file:</td><td class="infoTableCell">$db.dbFilename()</td></tr>
<tr><td class="infoTableHeader">Database file:</td><td class="infoTableCell">$db.db_filename()</td></tr>
#if $db.db_supports_backup
<tr><td class="infoTableHeader">Database backups:</td><td class="infoTableCell">$backup_db_path</td></tr>
#end if

View file

@ -159,7 +159,7 @@ class ConnectionFailDict(object):
if None is not db:
with self.lock:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('connection_fails'):
if my_db.has_table('connection_fails'):
domains = my_db.select('SELECT DISTINCT domain_url from connection_fails')
for domain in domains:
self.domain_list[domain['domain_url']] = ConnectionFailList(domain['domain_url'])
@ -515,7 +515,7 @@ class ConnectionFailList(object):
def _load_fail_values(self):
if None is not DATA_DIR:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('connection_fails_count'):
if my_db.has_table('connection_fails_count'):
r = my_db.select('SELECT * FROM connection_fails_count WHERE domain_url = ?', [self.url])
if r:
self._failure_count = try_int(r[0]['failure_count'], 0)
@ -536,7 +536,7 @@ class ConnectionFailList(object):
def _save_fail_value(self, field, value):
my_db = db.DBConnection('cache.db')
if my_db.hasTable('connection_fails_count'):
if my_db.has_table('connection_fails_count'):
r = my_db.action('UPDATE connection_fails_count SET %s = ? WHERE domain_url = ?' % field,
[value, self.url])
if 0 == r.rowcount:
@ -568,7 +568,7 @@ class ConnectionFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('connection_fails'):
if my_db.has_table('connection_fails'):
results = my_db.select('SELECT * FROM connection_fails WHERE domain_url = ?', [self.url])
self._fails = []
for r in results:
@ -586,7 +586,7 @@ class ConnectionFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('connection_fails'):
if my_db.has_table('connection_fails'):
# noinspection PyCallByClass,PyTypeChecker
time_limit = _totimestamp(datetime.datetime.now() - datetime.timedelta(days=28))
my_db.action('DELETE FROM connection_fails WHERE fail_time < ?', [time_limit])

View file

@ -428,7 +428,7 @@ class SickGear(object):
('sickbeard.db', sickgear.mainDB.MIN_DB_VERSION, sickgear.mainDB.MAX_DB_VERSION,
sickgear.mainDB.TEST_BASE_VERSION, 'MainDb')
]:
cur_db_version = db.DBConnection(d).checkDBVersion()
cur_db_version = db.DBConnection(d).check_db_version()
# handling of standalone TEST db versions
load_msg = 'Downgrading %s to production version' % d
@ -437,7 +437,7 @@ class SickGear(object):
print('Your [%s] database version (%s) is a test db version and doesn\'t match SickGear required '
'version (%s), downgrading to production db' % (d, cur_db_version, max_v))
self.execute_rollback(mo, max_v, load_msg)
cur_db_version = db.DBConnection(d).checkDBVersion()
cur_db_version = db.DBConnection(d).check_db_version()
if 100000 <= cur_db_version:
print(u'Rollback to production failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
@ -452,7 +452,7 @@ class SickGear(object):
print('Your [%s] database version (%s) is a db version and doesn\'t match SickGear required '
'version (%s), downgrading to production base db' % (d, cur_db_version, max_v))
self.execute_rollback(mo, base_v, load_msg)
cur_db_version = db.DBConnection(d).checkDBVersion()
cur_db_version = db.DBConnection(d).check_db_version()
if 100000 <= cur_db_version:
print(u'Rollback to production base failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
@ -474,7 +474,7 @@ class SickGear(object):
u' what this version of SickGear supports. Trying to rollback now. Please wait...' %
(d, cur_db_version))
self.execute_rollback(mo, max_v, load_msg)
if db.DBConnection(d).checkDBVersion() > max_v:
if db.DBConnection(d).check_db_version() > max_v:
print(u'Rollback failed.')
sys.exit(u'If you have used other forks, your database may be unusable due to their changes')
print(u'Rollback of [%s] successful.' % d)

View file

@ -1539,19 +1539,19 @@ def init_stage_2():
# initialize main database
my_db = db.DBConnection()
db.MigrationCode(my_db)
db.migration_code(my_db)
# initialize the cache database
my_db = db.DBConnection('cache.db')
db.upgradeDatabase(my_db, cache_db.InitialSchema)
db.upgrade_database(my_db, cache_db.InitialSchema)
# initialize the failed downloads database
my_db = db.DBConnection('failed.db')
db.upgradeDatabase(my_db, failed_db.InitialSchema)
db.upgrade_database(my_db, failed_db.InitialSchema)
# fix up any db problems
my_db = db.DBConnection()
db.sanityCheckDatabase(my_db, mainDB.MainSanityCheck)
db.sanity_check_db(my_db, mainDB.MainSanityCheck)
# initialize metadata_providers
metadata_provider_dict = metadata.get_metadata_generator_dict()

View file

@ -96,16 +96,16 @@ class InitialSchema(db.SchemaUpgrade):
])
def test(self):
return self.hasTable('lastUpdate')
return self.has_table('lastUpdate')
def execute(self):
self.do_query(self.queries[next(iter(self.queries))])
self.setDBVersion(MIN_DB_VERSION, check_db_version=False)
self.set_db_version(MIN_DB_VERSION, check_db_version=False)
class ConsolidateProviders(InitialSchema):
def test(self):
return 1 < self.checkDBVersion()
return 1 < self.call_check_db_version()
def execute(self):
keep_tables = {'lastUpdate', 'lastSearch', 'db_version',
@ -113,13 +113,13 @@ class ConsolidateProviders(InitialSchema):
# old provider_cache is dropped before re-creation
# noinspection SqlResolve
self.do_query(['DROP TABLE [provider_cache]'] + self.queries['consolidate_providers'] +
['DROP TABLE [%s]' % t for t in (set(self.listTables()) - keep_tables)])
['DROP TABLE [%s]' % t for t in (set(self.list_tables()) - keep_tables)])
self.finish(True)
class AddBacklogParts(ConsolidateProviders):
def test(self):
return 2 < self.checkDBVersion()
return 2 < self.call_check_db_version()
def execute(self):
# noinspection SqlResolve
@ -130,7 +130,7 @@ class AddBacklogParts(ConsolidateProviders):
class AddProviderFailureHandling(AddBacklogParts):
def test(self):
return 3 < self.checkDBVersion()
return 3 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['add_provider_fails'])
@ -139,17 +139,17 @@ class AddProviderFailureHandling(AddBacklogParts):
class AddIndexerToTables(AddProviderFailureHandling):
def test(self):
return 4 < self.checkDBVersion()
return 4 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['add_indexer_to_tables'])
self.addColumn('provider_cache', 'indexer', 'NUMERIC')
self.add_column('provider_cache', 'indexer', 'NUMERIC')
self.finish()
class AddGenericFailureHandling(AddBacklogParts):
def test(self):
return 5 < self.checkDBVersion()
return 5 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['connection_fails'])
@ -158,7 +158,7 @@ class AddGenericFailureHandling(AddBacklogParts):
class AddSaveQueues(AddGenericFailureHandling):
def test(self):
return 6 < self.checkDBVersion()
return 6 < self.call_check_db_version()
def execute(self):
self.do_query(self.queries['save_queues'])

View file

@ -28,7 +28,7 @@ TEST_BASE_VERSION = None # the base production db version, only needed for TEST
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.hasTable('failed')
return self.has_table('failed')
def execute(self):
queries = [
@ -45,18 +45,18 @@ class InitialSchema(db.SchemaUpgrade):
class SizeAndProvider(InitialSchema):
def test(self):
return self.hasColumn('failed', 'size') and self.hasColumn('failed', 'provider')
return self.has_column('failed', 'size') and self.has_column('failed', 'provider')
def execute(self):
self.addColumn('failed', 'size')
self.addColumn('failed', 'provider', 'TEXT', '')
self.add_column('failed', 'size')
self.add_column('failed', 'provider', 'TEXT', '')
class History(SizeAndProvider):
"""Snatch history that can't be modified by the user"""
def test(self):
return self.hasTable('history')
return self.has_table('history')
def execute(self):
self.connection.action('CREATE TABLE history (date NUMERIC, ' +
@ -67,21 +67,21 @@ class HistoryStatus(History):
"""Store episode status before snatch to revert to if necessary"""
def test(self):
return self.hasColumn('history', 'old_status')
return self.has_column('history', 'old_status')
def execute(self):
self.addColumn('history', 'old_status', 'NUMERIC', Quality.NONE)
self.addColumn('history', 'showid', 'NUMERIC', '-1')
self.addColumn('history', 'season', 'NUMERIC', '-1')
self.addColumn('history', 'episode', 'NUMERIC', '-1')
self.add_column('history', 'old_status', 'NUMERIC', Quality.NONE)
self.add_column('history', 'showid', 'NUMERIC', '-1')
self.add_column('history', 'season', 'NUMERIC', '-1')
self.add_column('history', 'episode', 'NUMERIC', '-1')
class AddIndexerToTables(HistoryStatus):
def test(self):
return self.hasColumn('history', 'indexer')
return self.has_column('history', 'indexer')
def execute(self):
self.addColumn('history', 'indexer', 'NUMERIC')
self.add_column('history', 'indexer', 'NUMERIC')
main_db = db.DBConnection('sickbeard.db')
show_ids = {s['prod_id']: s['tv_id'] for s in
@ -91,15 +91,15 @@ class AddIndexerToTables(HistoryStatus):
cl.append(['UPDATE history SET indexer = ? WHERE showid = ?', [i, s_id]])
self.connection.mass_action(cl)
if self.connection.hasTable('backup_history'):
if self.connection.has_table('backup_history'):
self.connection.action(
'REPLACE INTO history '
'(date, size, `release`, provider, old_status, showid, season, episode, indexer)'
' SELECT'
' date, size, `release`, provider, old_status, showid, season, episode, indexer'
' FROM backup_history')
self.connection.removeTable('backup_history')
self.connection.remove_table('backup_history')
self.connection.action('VACUUM')
self.setDBVersion(2, check_db_version=False)
self.set_db_version(2, check_db_version=False)

File diff suppressed because it is too large Load diff

View file

@ -37,6 +37,7 @@ from six import iterkeys, iteritems, itervalues
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from typing import Any, AnyStr, Dict, List, Optional, Tuple, Union
@ -47,7 +48,7 @@ db_support_upsert = (3, 25, 0) <= sqlite3.sqlite_version_info # type: bool
db_supports_backup = hasattr(sqlite3.Connection, 'backup') and (3, 6, 11) <= sqlite3.sqlite_version_info # type: bool
def dbFilename(filename='sickbeard.db', suffix=None):
def db_filename(filename='sickbeard.db', suffix=None):
# type: (AnyStr, Optional[AnyStr]) -> AnyStr
"""
@param filename: The sqlite database filename to use. If not specified,
@ -70,7 +71,7 @@ def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True):
:param value_dict: dict of values to be set {'table_fieldname': value}
:param key_dict: dict of restrains for update {'table_fieldname': value}
:param sanitise: True to remove k, v pairs in keyDict from valueDict as they must not exist in both.
This option has a performance hit so it's best to remove key_dict keys from value_dict and set this False instead.
This option has a performance hit, so it's best to remove key_dict keys from value_dict and set this False instead.
:type sanitise: Boolean
:return: list of 2 sql command
"""
@ -104,9 +105,9 @@ class DBConnection(object):
from . import helpers
self.new_db = False
db_src = dbFilename(filename)
db_src = db_filename(filename)
if not os.path.isfile(db_src):
db_alt = dbFilename('sickrage.db')
db_alt = db_filename('sickrage.db')
if os.path.isfile(db_alt):
helpers.copy_file(db_alt, db_src)
@ -143,6 +144,7 @@ class DBConnection(object):
logger.log('Backup target file already exists', logger.ERROR)
return False, 'Backup target file already exists'
# noinspection PyUnusedLocal
def progress(status, remaining, total):
logger.log('Copied %s of %s pages...' % (total - remaining, total), logger.DEBUG)
@ -167,11 +169,11 @@ class DBConnection(object):
return True, 'Backup successful'
def checkDBVersion(self):
def check_db_version(self):
# type: (...) -> int
try:
if self.hasTable('db_version'):
if self.has_table('db_version'):
result = self.select('SELECT db_version FROM db_version')
else:
version = self.select('PRAGMA user_version')[0]['user_version']
@ -185,7 +187,7 @@ class DBConnection(object):
if result:
version = int(result[0]['db_version'])
if 10000 > version and self.hasColumn('db_version', 'db_minor_version'):
if 10000 > version and self.has_column('db_version', 'db_minor_version'):
# noinspection SqlResolve
minor = self.select('SELECT db_minor_version FROM db_version')
return version * 100 + int(minor[0]['db_minor_version'])
@ -313,7 +315,7 @@ class DBConnection(object):
+ ' VALUES (%s)' % ', '.join(['?'] * (len(value_dict) + len(key_dict)))
self.action(query, list(value_dict.values()) + list(key_dict.values()))
def tableInfo(self, table_name):
def table_info(self, table_name):
# type: (AnyStr) -> Dict[AnyStr, Dict[AnyStr, AnyStr]]
# FIXME ? binding is not supported here, but I cannot find a way to escape a string manually
@ -331,38 +333,32 @@ class DBConnection(object):
d[col[0]] = row[idx]
return d
def hasTable(self, table_name):
def has_table(self, table_name):
# type: (AnyStr) -> bool
return 0 < len(self.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)))
def hasColumn(self, table_name, column):
def has_column(self, table_name, column):
# type: (AnyStr, AnyStr) -> bool
return column in self.tableInfo(table_name)
return column in self.table_info(table_name)
def hasIndex(self, table_name, index):
def has_index(self, table_name, index):
# type: (AnyStr, AnyStr) -> bool
sqlResults = self.select('PRAGMA index_list([%s])' % table_name)
for result in sqlResults:
sql_results = self.select('PRAGMA index_list([%s])' % table_name)
for result in sql_results:
if result['name'] == index:
return True
return False
def removeIndex(self, table, name):
def remove_index(self, table, name):
# type: (AnyStr, AnyStr) -> None
if self.hasIndex(table, name):
if self.has_index(table, name):
self.action('DROP INDEX' + ' [%s]' % name)
def removeTable(self, name):
def remove_table(self, name):
# type: (AnyStr) -> None
if self.hasTable(name):
if self.has_table(name):
self.action('DROP TABLE' + ' [%s]' % name)
# noinspection SqlResolve
def addColumn(self, table, column, data_type='NUMERIC', default=0):
# type: (AnyStr, AnyStr, AnyStr, Any) -> None
self.action('ALTER TABLE [%s] ADD %s %s' % (table, column, data_type))
self.action('UPDATE [%s] SET %s = ?' % (table, column), (default,))
def has_flag(self, flag_name):
# type: (AnyStr) -> bool
sql_result = self.select('SELECT flag FROM flags WHERE flag = ?', [flag_name])
@ -415,7 +411,7 @@ class DBConnection(object):
logger.load_log('Upgrading %s' % self.filename, to_log, log_level)
def sanityCheckDatabase(connection, sanity_check):
def sanity_check_db(connection, sanity_check):
sanity_check(connection).check()
@ -427,36 +423,36 @@ class DBSanityCheck(object):
pass
def upgradeDatabase(connection, schema):
def upgrade_database(connection, schema):
logger.log(u'Checking database structure...', logger.MESSAGE)
connection.is_upgrading = False
connection.new_db = 0 == connection.checkDBVersion()
_processUpgrade(connection, schema)
connection.new_db = 0 == connection.check_db_version()
_process_upgrade(connection, schema)
if connection.is_upgrading:
connection.upgrade_log('Finished')
def prettyName(class_name):
def _pretty_name(class_name):
# type: (AnyStr) -> AnyStr
return ' '.join([x.group() for x in re.finditer('([A-Z])([a-z0-9]+)', class_name)])
def restoreDatabase(filename, version):
def _restore_database(filename, version):
logger.log(u'Restoring database before trying upgrade again')
if not sickgear.helpers.restore_versioned_file(dbFilename(filename=filename, suffix='v%s' % version), version):
if not sickgear.helpers.restore_versioned_file(db_filename(filename=filename, suffix='v%s' % version), version):
logger.log_error_and_exit(u'Database restore failed, abort upgrading database')
return False
return True
def _processUpgrade(connection, upgrade_class):
def _process_upgrade(connection, upgrade_class):
instance = upgrade_class(connection)
logger.log('Checking %s database upgrade' % prettyName(upgrade_class.__name__), logger.DEBUG)
logger.log('Checking %s database upgrade' % _pretty_name(upgrade_class.__name__), logger.DEBUG)
if not instance.test():
connection.is_upgrading = True
connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or prettyName(upgrade_class.__name__))
logger.log('Database upgrade required: %s' % prettyName(upgrade_class.__name__), logger.MESSAGE)
db_version = connection.checkDBVersion()
connection.upgrade_log(getattr(upgrade_class, 'pretty_name', None) or _pretty_name(upgrade_class.__name__))
logger.log('Database upgrade required: %s' % _pretty_name(upgrade_class.__name__), logger.MESSAGE)
db_version = connection.check_db_version()
try:
# only do backup if it's not a new db
0 < db_version and backup_database(connection, connection.filename, db_version)
@ -468,7 +464,7 @@ def _processUpgrade(connection, upgrade_class):
# close db before attempting restore
connection.close()
if restoreDatabase(connection.filename, db_version):
if _restore_database(connection.filename, db_version):
logger.log_error_and_exit('Successfully restored database version: %s' % db_version)
else:
logger.log_error_and_exit('Failed to restore database version: %s' % db_version)
@ -480,7 +476,7 @@ def _processUpgrade(connection, upgrade_class):
logger.log('%s upgrade not required' % upgrade_class.__name__, logger.DEBUG)
for upgradeSubClass in upgrade_class.__subclasses__():
_processUpgrade(connection, upgradeSubClass)
_process_upgrade(connection, upgradeSubClass)
# Base migration class. All future DB changes should be subclassed from this class
@ -488,11 +484,11 @@ class SchemaUpgrade(object):
def __init__(self, connection, **kwargs):
self.connection = connection
def hasTable(self, table_name):
def has_table(self, table_name):
return 0 < len(self.connection.select('SELECT 1 FROM sqlite_master WHERE name = ?;', (table_name,)))
def hasColumn(self, table_name, column):
return column in self.connection.tableInfo(table_name)
def has_column(self, table_name, column):
return column in self.connection.table_info(table_name)
def list_tables(self):
# type: (...) -> List[AnyStr]
@ -511,13 +507,13 @@ class SchemaUpgrade(object):
['index'])]
# noinspection SqlResolve
def addColumn(self, table, column, data_type='NUMERIC', default=0, set_default=False):
def add_column(self, table, column, data_type='NUMERIC', default=0, set_default=False):
self.connection.action('ALTER TABLE [%s] ADD %s %s%s' %
(table, column, data_type, ('', ' DEFAULT "%s"' % default)[set_default]))
self.connection.action('UPDATE [%s] SET %s = ?' % (table, column), (default,))
# noinspection SqlResolve
def addColumns(self, table, column_list=None):
def add_columns(self, table, column_list=None):
# type: (AnyStr, List) -> None
if isinstance(column_list, list):
sql = []
@ -535,25 +531,21 @@ class SchemaUpgrade(object):
if sql:
self.connection.mass_action(sql)
def dropColumn(self, table, columns):
# type: (AnyStr, AnyStr) -> None
self.drop_columns(table, columns)
def drop_columns(self, table, column):
# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> None
# get old table columns and store the ones we want to keep
result = self.connection.select('pragma table_info([%s])' % table)
columns_list = ([column], column)[isinstance(column, list)]
keptColumns = list(filter(lambda col: col['name'] not in columns_list, result))
kept_columns = list(filter(lambda col: col['name'] not in columns_list, result))
keptColumnsNames = []
kept_columns_names = []
final = []
pk = []
# copy the old table schema, column by column
for column in keptColumns:
for column in kept_columns:
keptColumnsNames.append(column['name'])
kept_columns_names.append(column['name'])
cl = [column['name'], column['type']]
@ -574,7 +566,7 @@ class SchemaUpgrade(object):
# join all the table column creation fields
final = ', '.join(final)
keptColumnsNames = ', '.join(keptColumnsNames)
kept_columns_names = ', '.join(kept_columns_names)
# generate sql for the new table creation
if 0 == len(pk):
@ -586,12 +578,12 @@ class SchemaUpgrade(object):
# create new temporary table and copy the old table data across, barring the removed column
self.connection.action(sql)
# noinspection SqlResolve
self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, keptColumnsNames, table))
self.connection.action('INSERT INTO [%s_new] SELECT %s FROM [%s]' % (table, kept_columns_names, table))
# copy the old indexes from the old table
result = self.connection.select("SELECT sql FROM sqlite_master WHERE tbl_name=? AND type='index'", [table])
# remove the old table and rename the new table to take it's place
# remove the old table and rename the new table to take its place
# noinspection SqlResolve
self.connection.action('DROP TABLE [%s]' % table)
# noinspection SqlResolve
@ -605,22 +597,19 @@ class SchemaUpgrade(object):
# vacuum the db as we will have a lot of space to reclaim after dropping tables
self.connection.action('VACUUM')
def checkDBVersion(self):
return self.connection.checkDBVersion()
def call_check_db_version(self):
return self.connection.check_db_version()
def incDBVersion(self):
new_version = self.checkDBVersion() + 1
def inc_db_version(self):
new_version = self.call_check_db_version() + 1
# noinspection SqlConstantCondition
self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version])
return new_version
def setDBVersion(self, new_version, check_db_version=True):
def set_db_version(self, new_version, check_db_version=True):
# noinspection SqlConstantCondition
self.connection.action('UPDATE db_version SET db_version = ? WHERE 1=1', [new_version])
return check_db_version and self.checkDBVersion()
def listTables(self):
return self.list_tables()
return check_db_version and self.call_check_db_version()
def do_query(self, queries):
if not isinstance(queries, list):
@ -630,23 +619,23 @@ class SchemaUpgrade(object):
for query in queries:
tbl_name = re.findall(r'(?i)DROP.*?TABLE.*?\[?([^\s\]]+)', query)
if tbl_name and not self.hasTable(tbl_name[0]):
if tbl_name and not self.has_table(tbl_name[0]):
continue
tbl_name = re.findall(r'(?i)CREATE.*?TABLE.*?\s([^\s(]+)\s*\(', query)
if tbl_name and self.hasTable(tbl_name[0]):
if tbl_name and self.has_table(tbl_name[0]):
continue
self.connection.action(query)
def finish(self, tbl_dropped=False):
if tbl_dropped:
self.connection.action('VACUUM')
self.incDBVersion()
self.inc_db_version()
def upgrade_log(self, *args, **kwargs):
self.connection.upgrade_log(*args, **kwargs)
def MigrationCode(my_db):
def migration_code(my_db):
schema = {
0: sickgear.mainDB.InitialSchema,
9: sickgear.mainDB.AddSizeAndSceneNameFields,
@ -719,7 +708,7 @@ def MigrationCode(my_db):
# 20002: sickgear.mainDB.AddCoolSickGearFeature3,
}
db_version = my_db.checkDBVersion()
db_version = my_db.check_db_version()
my_db.new_db = 0 == db_version
logger.log(u'Detected database version: v%s' % db_version, logger.DEBUG)
@ -746,7 +735,7 @@ def MigrationCode(my_db):
my_db.close()
logger.log(u'Failed to update database with error: %s attempting recovery...' % ex(e), logger.ERROR)
if restoreDatabase(my_db.filename, db_version):
if _restore_database(my_db.filename, db_version):
# initialize the main SB database
logger.log_error_and_exit(u'Successfully restored database version: %s' % db_version)
else:
@ -777,7 +766,7 @@ def backup_database(db_connection, filename, version):
return
logger.log(u'Backing up database before upgrade')
if not sickgear.helpers.backup_versioned_file(dbFilename(filename), version):
if not sickgear.helpers.backup_versioned_file(db_filename(filename), version):
logger.log_error_and_exit(u'Database backup failed, abort upgrading database')
else:
logger.log(u'Proceeding with upgrade')
@ -841,7 +830,7 @@ def backup_all_dbs(target, compress=True, prefer_7z=True):
optional compress with zip or 7z (python 3 only, external lib py7zr required)
7z falls back to zip if py7zr is not available
:param target: target folder to backup to
:param target: target folder for backup db
:param compress: compress db backups
:param prefer_7z: prefer 7z compression if available
:return: success, message

View file

@ -193,7 +193,7 @@ class ProviderFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('provider_fails'):
if my_db.has_table('provider_fails'):
results = my_db.select('SELECT * FROM provider_fails WHERE prov_name = ?', [self.provider_name()])
self._fails = []
for r in results:
@ -210,7 +210,7 @@ class ProviderFailList(object):
with self.lock:
try:
my_db = db.DBConnection('cache.db')
if my_db.hasTable('provider_fails'):
if my_db.has_table('provider_fails'):
# noinspection PyCallByClass,PyTypeChecker
time_limit = int(timestamp_near(datetime.datetime.now() - datetime.timedelta(days=28)))
my_db.action('DELETE FROM provider_fails WHERE fail_time < ?', [time_limit])
@ -281,7 +281,7 @@ class GenericProvider(object):
def _load_fail_values(self):
if hasattr(sickgear, 'DATA_DIR'):
my_db = db.DBConnection('cache.db')
if my_db.hasTable('provider_fails_count'):
if my_db.has_table('provider_fails_count'):
r = my_db.select('SELECT * FROM provider_fails_count WHERE prov_name = ?', [self.get_id()])
if r:
self._failure_count = helpers.try_int(r[0]['failure_count'], 0)
@ -302,7 +302,7 @@ class GenericProvider(object):
def _save_fail_value(self, field, value):
my_db = db.DBConnection('cache.db')
if my_db.hasTable('provider_fails_count'):
if my_db.has_table('provider_fails_count'):
r = my_db.action('UPDATE provider_fails_count SET %s = ? WHERE prov_name = ?' % field,
[value, self.get_id()])
if 0 == r.rowcount:

View file

@ -42,7 +42,7 @@ class CacheDBConnection(db.DBConnection):
# Create the table if it's not already there
try:
if not self.hasTable('lastUpdate'):
if not self.has_table('lastUpdate'):
self.action('CREATE TABLE lastUpdate (provider TEXT, time NUMERIC)')
except (BaseException, Exception) as e:
if ex(e) != 'table lastUpdate already exists':

View file

@ -48,7 +48,7 @@ class MigrationBasicTests(test.SickbeardTestDBCase):
update.execute()
sleep(0.1)
db.MigrationCode(my_db)
db.migration_code(my_db)
my_db.close()
# force python to garbage collect all db connections, so that the file can be deleted
@ -67,9 +67,9 @@ class MigrationBasicTests(test.SickbeardTestDBCase):
# 0 -> 31
class OldInitialSchema(db.SchemaUpgrade):
def execute(self):
db.backup_database(self.connection, 'sickbeard.db', self.checkDBVersion())
db.backup_database(self.connection, 'sickbeard.db', self.call_check_db_version())
if not self.hasTable('tv_shows') and not self.hasTable('db_version'):
if not self.has_table('tv_shows') and not self.has_table('db_version'):
queries = [
'CREATE TABLE db_version (db_version INTEGER);',
'CREATE TABLE history ('
@ -105,7 +105,7 @@ class OldInitialSchema(db.SchemaUpgrade):
self.connection.action(query)
else:
cur_db_version = self.checkDBVersion()
cur_db_version = self.call_check_db_version()
if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(
@ -127,13 +127,13 @@ class OldInitialSchema(db.SchemaUpgrade):
' your database may be unusable due to their modifications.'
)
return self.checkDBVersion()
return self.call_check_db_version()
class AddDefaultEpStatusToTvShows(db.SchemaUpgrade):
def execute(self):
self.addColumn('tv_shows', 'default_ep_status', 'TEXT', '')
self.setDBVersion(41, check_db_version=False)
self.add_column('tv_shows', 'default_ep_status', 'TEXT', '')
self.set_db_version(41, check_db_version=False)
if '__main__' == __name__:

View file

@ -195,16 +195,16 @@ def setup_test_db():
"""upgrades the db to the latest version
"""
# upgrading the db
db.MigrationCode(db.DBConnection())
db.migration_code(db.DBConnection())
# fix up any db problems
db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck)
db.sanity_check_db(db.DBConnection(), mainDB.MainSanityCheck)
# and for cachedb too
db.upgradeDatabase(db.DBConnection('cache.db'), cache_db.InitialSchema)
db.upgrade_database(db.DBConnection('cache.db'), cache_db.InitialSchema)
# and for faileddb too
db.upgradeDatabase(db.DBConnection('failed.db'), failed_db.InitialSchema)
db.upgrade_database(db.DBConnection('failed.db'), failed_db.InitialSchema)
def teardown_test_db():