Merge branch 'release/3.28.0'

This commit is contained in:
JackDandy 2023-04-12 13:08:11 +01:00
commit f92f9f8014
309 changed files with 11198 additions and 26383 deletions

View file

@ -1,4 +1,24 @@
### 3.27.13 (2023-04-12 10:15:00 UTC) ### 3.28.0 (2023-04-12 13:05:00 UTC)
* Update html5lib 1.1 (f87487a) to 1.2-dev (3e500bb)
* Update package resource API 63.2.0 (3ae44cd) to 67.5.1 (f51eccd)
* Update Tornado Web Server 6.2.0 (a4f08a3) to 6.3.0 (7186b86)
* Update urllib3 1.26.13 (25fbd5f) to 1.26.14 (a06c05c)
* Change remove calls to legacy py2 fix encoding function
* Change requirements for pure py3
* Change codebase cleanups
* Change improve perf by using generators with `any`
* Change deprecate processEpisode used by nzbToMedia to advise how to configure API instead
* Change optionally add disk free space in response to three Web API endpoints
* Change increase API version number to 15
* Add actually use mount points to get disk free space
* Add optional "freespace" parameter to endpoints: sg.getrootdirs, sg.addrootdir, sg.deleterootdir
* Change update help of affected endpoints
* Fix explicitly save rootdirs after adding or deleting via Web API
* Change add Rarbg UHD search category
### 3.27.13 (2023-04-12 10:15:00 UTC)
* Change fix show id log output * Change fix show id log output
* Change handle exceptions thrown from pkg_resources parsing newly extended working set modules not even used by SG * Change handle exceptions thrown from pkg_resources parsing newly extended working set modules not even used by SG
@ -771,7 +791,7 @@
* Change allow Python 3.8.10 and 3.9.5 * Change allow Python 3.8.10 and 3.9.5
* Remove PiSexy provider * Remove PiSexy provider
* Fix refreshShow, prevent another refresh of show if already in queue and not forced * Fix refresh_show, prevent another refresh of show if already in queue and not forced
* Fix webapi set scene season * Fix webapi set scene season
* Fix set path in all_tests for py2 * Fix set path in all_tests for py2
* Fix webapi exception if no backlog was done before (CMD_SickGearCheckScheduler) * Fix webapi exception if no backlog was done before (CMD_SickGearCheckScheduler)
@ -1103,7 +1123,7 @@
* Add API response field `global exclude require` to sg.listrequirewords endpoint * Add API response field `global exclude require` to sg.listrequirewords endpoint
* Change improve Popen resource usage under py2 * Change improve Popen resource usage under py2
* Add overall failure monitoring to History/Connect fails (renamed from "Provider fails") * Add overall failure monitoring to History/Connect fails (renamed from "Provider fails")
* Change log exception during updateCache in newznab * Change log exception during update_cache in newznab
* Change make Py3.9 preparations * Change make Py3.9 preparations
* Change anime "Available groups" to display "No groups listed..." when API is fine with no results instead of blank * Change anime "Available groups" to display "No groups listed..." when API is fine with no results instead of blank
* Change improve clarity of anime group lists by using terms Allow list and Block list * Change improve clarity of anime group lists by using terms Allow list and Block list

View file

@ -37,6 +37,9 @@ if old_magic != magic_number:
# skip cleaned005 as used during dev by testers # skip cleaned005 as used during dev by testers
cleanups = [ cleanups = [
['.cleaned009.tmp', r'lib\scandir', [
r'lib\scandir\__pycache__', r'lib\scandir',
]],
['.cleaned008.tmp', r'lib\tornado_py3', [ ['.cleaned008.tmp', r'lib\tornado_py3', [
r'lib\bs4_py2\builder\__pycache__', r'lib\bs4_py2\builder', r'lib\bs4_py2', r'lib\bs4_py2\builder\__pycache__', r'lib\bs4_py2\builder', r'lib\bs4_py2',
r'lib\bs4_py3\builder\__pycache__', r'lib\bs4_py3\builder', r'lib\bs4_py3', r'lib\bs4_py3\builder\__pycache__', r'lib\bs4_py3\builder', r'lib\bs4_py3',
@ -122,7 +125,7 @@ for cleaned_path, test_path, dir_list in cleanups:
pass pass
with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: with io.open(cleaned_file, 'w+', encoding='utf-8') as fp:
fp.write(u'This file exists to prevent a rerun delete of *.pyc, *.pyo files') fp.write('This file exists to prevent a rerun delete of *.pyc, *.pyo files')
fp.flush() fp.flush()
os.fsync(fp.fileno()) os.fsync(fp.fileno())
@ -163,10 +166,10 @@ if not os.path.isfile(cleaned_file) or os.path.exists(test):
swap_name = cleaned_file swap_name = cleaned_file
cleaned_file = danger_output cleaned_file = danger_output
danger_output = swap_name danger_output = swap_name
msg = u'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files) msg = 'Failed (permissions?) to delete file(s). You must manually delete:\r\n%s' % '\r\n'.join(bad_files)
print(msg) print(msg)
else: else:
msg = u'This file exists to prevent a rerun delete of dead lib/html5lib files' msg = 'This file exists to prevent a rerun delete of dead lib/html5lib files'
with io.open(cleaned_file, 'w+', encoding='utf-8') as fp: with io.open(cleaned_file, 'w+', encoding='utf-8') as fp:
fp.write(msg) fp.write(msg)

View file

@ -70,11 +70,11 @@ addList("Command", "Help", "?cmd=help", "sg.functions-list", "","", "default");
addOption("sg.functions-list", "$k", "&subject=$k", "", "", "#echo ('sb', 'sg')['sg' in $k]#") addOption("sg.functions-list", "$k", "&subject=$k", "", "", "#echo ('sb', 'sg')['sg' in $k]#")
#end for #end for
addList("Command", "SickBeard.AddRootDir", "?cmd=sb.addrootdir", "sb.addrootdir"); addList("Command", "SickBeard.AddRootDir", "?cmd=sb.addrootdir", "sb.addrootdir");
addList("Command", "SickGear.AddRootDir", "?cmd=sg.addrootdir", "sb.addrootdir"); addList("Command", "SickGear.AddRootDir", "?cmd=sg.addrootdir", "sg.addrootdir");
addOption("Command", "SickBeard.CheckScheduler", "?cmd=sb.checkscheduler"); addOption("Command", "SickBeard.CheckScheduler", "?cmd=sb.checkscheduler");
addOption("Command", "SickGear.CheckScheduler", "?cmd=sg.checkscheduler"); addOption("Command", "SickGear.CheckScheduler", "?cmd=sg.checkscheduler");
addList("Command", "SickBeard.DeleteRootDir", "?cmd=sb.deleterootdir", "sb.deleterootdir"); addList("Command", "SickBeard.DeleteRootDir", "?cmd=sb.deleterootdir", "sb.deleterootdir");
addList("Command", "SickGear.DeleteRootDir", "?cmd=sg.deleterootdir", "sb.deleterootdir"); addList("Command", "SickGear.DeleteRootDir", "?cmd=sg.deleterootdir", "sg.deleterootdir");
addOption("Command", "SickBeard.ForceSearch", "?cmd=sb.forcesearch"); addOption("Command", "SickBeard.ForceSearch", "?cmd=sb.forcesearch");
addList("Command", "SickGear.ForceSearch", "?cmd=sg.forcesearch", "sg.forcesearch"); addList("Command", "SickGear.ForceSearch", "?cmd=sg.forcesearch", "sg.forcesearch");
addOption("Command", "SickGear.SearchQueue", "?cmd=sg.searchqueue"); addOption("Command", "SickGear.SearchQueue", "?cmd=sg.searchqueue");
@ -88,7 +88,7 @@ addList("Command", "SickGear.GetIndexers", "?cmd=sg.getindexers", "listindexers"
addList("Command", "SickGear.GetIndexerIcon", "?cmd=sg.getindexericon", "getindexericon"); addList("Command", "SickGear.GetIndexerIcon", "?cmd=sg.getindexericon", "getindexericon");
addList("Command", "SickGear.GetNetworkIcon", "?cmd=sg.getnetworkicon", "getnetworkicon"); addList("Command", "SickGear.GetNetworkIcon", "?cmd=sg.getnetworkicon", "getnetworkicon");
addOption("Command", "SickBeard.GetRootDirs", "?cmd=sb.getrootdirs"); addOption("Command", "SickBeard.GetRootDirs", "?cmd=sb.getrootdirs");
addOption("Command", "SickGear.GetRootDirs", "?cmd=sg.getrootdirs"); addList("Command", "SickGear.GetRootDirs", "?cmd=sg.getrootdirs", "sg.addfreespace");
addList("Command", "SickBeard.PauseBacklog", "?cmd=sb.pausebacklog", "sb.pausebacklog"); addList("Command", "SickBeard.PauseBacklog", "?cmd=sb.pausebacklog", "sb.pausebacklog");
addList("Command", "SickGear.PauseBacklog", "?cmd=sg.pausebacklog", "sb.pausebacklog"); addList("Command", "SickGear.PauseBacklog", "?cmd=sg.pausebacklog", "sb.pausebacklog");
addOption("Command", "SickBeard.Ping", "?cmd=sb.ping"); addOption("Command", "SickBeard.Ping", "?cmd=sb.ping");
@ -621,10 +621,26 @@ addOption("sb.addrootdir-opt", "Optional Param", "", 1);
addOption("sb.addrootdir-opt", "Default", "&default=1"); addOption("sb.addrootdir-opt", "Default", "&default=1");
addOption("sb.addrootdir-opt", "Not Default", "&default=0"); addOption("sb.addrootdir-opt", "Not Default", "&default=0");
addOption("sb.deleterootdir", "C:\\Temp", "&location=C:\\Temp", "", 1); addList("sg.addrootdir", "C:\\Temp", "&location=C:\\Temp", "sg.addrootdir-opt");
addList("sg.addrootdir", "/usr/bin", "&location=/usr/bin/", "sg.addrootdir-opt");
addList("sg.addrootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location", "sg.addrootdir-opt");
addList("sg.addrootdir-opt", "Optional Param", "", "sg.addfreespace");
addList("sg.addrootdir-opt", "Default", "&default=1", "sg.addfreespace");
addList("sg.addrootdir-opt", "Not Default", "&default=0", "sg.addfreespace");
addOption("sb.deleterootdir", "C:\\Temp", "&location=C:\\Temp", 1);
addOption("sb.deleterootdir", "/usr/bin", "&location=/usr/bin/"); addOption("sb.deleterootdir", "/usr/bin", "&location=/usr/bin/");
addOption("sb.deleterootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location"); addOption("sb.deleterootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location");
addList("sg.deleterootdir", "C:\\Temp", "&location=C:\\Temp", "sg.addfreespace");
addList("sg.deleterootdir", "/usr/bin", "&location=/usr/bin/", "sg.addfreespace");
addList("sg.deleterootdir", "S:\\Invalid_Location", "&location=S:\\Invalid_Location", "sg.addfreespace");
addOption("sg.addfreespace", "Optional Param", "", 1)
addOption("sg.addfreespace", "incl Freespace", "&freespace=1")
addOption("sg.addfreespace", "excl Freespace", "&freespace=0")
#for $cur_show_obj in $sortedShowList: #for $cur_show_obj in $sortedShowList:
addList("show.pause", "$cur_show_obj.name", "&indexerid=$cur_show_obj.prodid", "show.pause-opt"); addList("show.pause", "$cur_show_obj.name", "&indexerid=$cur_show_obj.prodid", "show.pause-opt");
#end for #end for

View file

@ -65,7 +65,7 @@
<tbody> <tbody>
#for $hItem in $cacheResults: #for $hItem in $cacheResults:
#set $provider = $providers.getProviderClass($hItem['provider']) #set $provider = $providers.get_by_id($hItem['provider'])
#set $tip = '%s @ %s' % ($hItem['provider'], $SGDatetime.sbfdatetime($SGDatetime.fromtimestamp($hItem['time']))) #set $tip = '%s @ %s' % ($hItem['provider'], $SGDatetime.sbfdatetime($SGDatetime.fromtimestamp($hItem['time'])))
#set $ver = $hItem['version'] #set $ver = $hItem['version']
#set $ver = ($ver, '')[-1 == $ver] #set $ver = ($ver, '')[-1 == $ver]

View file

@ -182,7 +182,11 @@ def param(visible=True, rid=None, cache_person=None, cache_char=None, person=Non
#end if #end if
#set $section_links = False #set $section_links = False
#set $all_sources = $TVInfoAPI().all_sources
#for $cur_src, $cur_sid in sorted(iteritems($person.ids)) #for $cur_src, $cur_sid in sorted(iteritems($person.ids))
#if $cur_src not in $all_sources:
#continue
#end if
#if $TVInfoAPI($cur_src).config.get('people_url') #if $TVInfoAPI($cur_src).config.get('people_url')
#if not $section_links #if not $section_links
#set $section_links = True #set $section_links = True

View file

@ -29,7 +29,7 @@
</td> </td>
</tr> </tr>
<tr><td class="infoTableHeader">Config file:</td><td class="infoTableCell">$sg_str('CONFIG_FILE')</td></tr> <tr><td class="infoTableHeader">Config file:</td><td class="infoTableCell">$sg_str('CONFIG_FILE')</td></tr>
<tr><td class="infoTableHeader">Database file:</td><td class="infoTableCell">$db.dbFilename()</td></tr> <tr><td class="infoTableHeader">Database file:</td><td class="infoTableCell">$db.db_filename()</td></tr>
#if $db.db_supports_backup #if $db.db_supports_backup
<tr><td class="infoTableHeader">Database backups:</td><td class="infoTableCell">$backup_db_path</td></tr> <tr><td class="infoTableHeader">Database backups:</td><td class="infoTableCell">$backup_db_path</td></tr>
#end if #end if

View file

@ -13,7 +13,6 @@
#from sickgear.sgdatetime import * #from sickgear.sgdatetime import *
<% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp# <% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp#
<% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp# <% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp#
#from _23 import list_keys
## ##
#set global $title = 'Config - General' #set global $title = 'Config - General'
#set global $header = 'General Settings' #set global $header = 'General Settings'
@ -846,7 +845,7 @@
<span class="component-title">File logging level:</span> <span class="component-title">File logging level:</span>
<span class="component-desc"> <span class="component-desc">
<select id="file_logging_presets" name="file_logging_preset" class="form-control input-sm"> <select id="file_logging_presets" name="file_logging_preset" class="form-control input-sm">
#set $levels = $list_keys(file_logging_presets) #set $levels = $list(file_logging_presets)
#set void = $levels.sort(key=lambda x: $file_logging_presets[$x]) #set void = $levels.sort(key=lambda x: $file_logging_presets[$x])
#set $level_count = len($levels) #set $level_count = len($levels)
#for $level in $levels #for $level in $levels

View file

@ -36,12 +36,12 @@
<!-- <!--
\$(document).ready(function(){ \$(document).ready(function(){
#if $sickgear.USE_NZBS #if $sickgear.USE_NZBS
#for $cur_newznab_provider in $sickgear.newznabProviderList: #for $cur_newznab_provider in $sickgear.newznab_providers:
\$(this).addProvider('$cur_newznab_provider.get_id()', '$cur_newznab_provider.name', '$cur_newznab_provider.url', '<%= starify(cur_newznab_provider.key) %>', '$cur_newznab_provider.cat_ids', $int($cur_newznab_provider.default), !0); \$(this).addProvider('$cur_newznab_provider.get_id()', '$cur_newznab_provider.name', '$cur_newznab_provider.url', '<%= starify(cur_newznab_provider.key) %>', '$cur_newznab_provider.cat_ids', $int($cur_newznab_provider.default), !0);
#end for #end for
#end if #end if
#if $sickgear.USE_TORRENTS #if $sickgear.USE_TORRENTS
#for $cur_torrent_rss_provider in $sickgear.torrentRssProviderList: #for $cur_torrent_rss_provider in $sickgear.torrent_rss_providers:
\$(this).addTorrentRssProvider('$cur_torrent_rss_provider.get_id()', '$cur_torrent_rss_provider.name', '$cur_torrent_rss_provider.url', '<%= starify(cur_torrent_rss_provider.cookies) %>'); \$(this).addTorrentRssProvider('$cur_torrent_rss_provider.get_id()', '$cur_torrent_rss_provider.name', '$cur_torrent_rss_provider.url', '<%= starify(cur_torrent_rss_provider.cookies) %>');
#end for #end for
#end if #end if
@ -101,7 +101,7 @@
<ul id="provider_order_list" class="provider_order_panel"> <ul id="provider_order_list" class="provider_order_panel">
#for $cur_provider in [$x for $x in $sickgear.providers.sortedProviderList() #for $cur_provider in [$x for $x in $sickgear.providers.sorted_sources()
if $x.providerType == $GenericProvider.NZB and $sickgear.USE_NZBS or if $x.providerType == $GenericProvider.NZB and $sickgear.USE_NZBS or
$x.providerType == $GenericProvider.TORRENT and $sickgear.USE_TORRENTS] $x.providerType == $GenericProvider.TORRENT and $sickgear.USE_TORRENTS]
#set $cur_name = $cur_provider.get_id() #set $cur_name = $cur_provider.get_id()
@ -129,7 +129,7 @@
#end for #end for
</ul> </ul>
<input type="hidden" name="provider_order" id="provider_order" value="<%=' '.join([x.get_id()+':'+str(int(x.is_enabled())) for x in sickgear.providers.sortedProviderList()])%>"/> <input type="hidden" name="provider_order" id="provider_order" value="<%=' '.join([x.get_id()+':'+str(int(x.is_enabled())) for x in sickgear.providers.sorted_sources()])%>"/>
#if $sickgear.USE_NZBS or $sickgear.USE_TORRENTS #if $sickgear.USE_NZBS or $sickgear.USE_TORRENTS
<div id="provider_key"> <div id="provider_key">
<span style="float:left;font-size:10px;vertical-align:top;font-weight:normal">(PA)</span><p class="note">Public access, no account required</p> <span style="float:left;font-size:10px;vertical-align:top;font-weight:normal">(PA)</span><p class="note">Public access, no account required</p>
@ -168,7 +168,7 @@
<span class="component-desc"> <span class="component-desc">
#set $provider_config_list_enabled = [] #set $provider_config_list_enabled = []
#set $provider_config_list = [] #set $provider_config_list = []
#for $cur_provider in [$x for $x in $sickgear.providers.sortedProviderList() #for $cur_provider in [$x for $x in $sickgear.providers.sorted_sources()
if $x.providerType == $GenericProvider.NZB and $sickgear.USE_NZBS or if $x.providerType == $GenericProvider.NZB and $sickgear.USE_NZBS or
$x.providerType == $GenericProvider.TORRENT and $sickgear.USE_TORRENTS] $x.providerType == $GenericProvider.TORRENT and $sickgear.USE_TORRENTS]
#if $cur_provider.is_enabled() #if $cur_provider.is_enabled()
@ -213,7 +213,7 @@
#set $filter_scene_rej_nuked_desc = 'not scene nuked' #set $filter_scene_rej_nuked_desc = 'not scene nuked'
#set $filter_scene_nuked_active_desc = 'nuked if no active search results' #set $filter_scene_nuked_active_desc = 'nuked if no active search results'
#set $filter_tip = 'nothing selected allows everything (i.e. no filtering, default)' #set $filter_tip = 'nothing selected allows everything (i.e. no filtering, default)'
#for $cur_newznab_provider in [$cur_provider for $cur_provider in $sickgear.newznabProviderList] #for $cur_newznab_provider in [$cur_provider for $cur_provider in $sickgear.newznab_providers]
<div class="providerDiv" id="${cur_newznab_provider.get_id()}Div"> <div class="providerDiv" id="${cur_newznab_provider.get_id()}Div">
#set $can_recent = $hasattr($cur_newznab_provider, 'enable_recentsearch') #set $can_recent = $hasattr($cur_newznab_provider, 'enable_recentsearch')
#set $can_backlog = $hasattr($cur_newznab_provider, 'enable_backlog') #set $can_backlog = $hasattr($cur_newznab_provider, 'enable_backlog')
@ -345,8 +345,8 @@
## ##
## ##
#for $cur_nzb_provider in [$cur_provider for $cur_provider in $sickgear.providers.sortedProviderList() #for $cur_nzb_provider in [$cur_provider for $cur_provider in $sickgear.providers.sorted_sources()
if $cur_provider.providerType == $GenericProvider.NZB and $cur_provider not in $sickgear.newznabProviderList]: if $cur_provider.providerType == $GenericProvider.NZB and $cur_provider not in $sickgear.newznab_providers]:
<div class="providerDiv" id="${cur_nzb_provider.get_id()}Div"> <div class="providerDiv" id="${cur_nzb_provider.get_id()}Div">
#set $can_recent = $hasattr($cur_nzb_provider, 'enable_recentsearch') #set $can_recent = $hasattr($cur_nzb_provider, 'enable_recentsearch')
#set $can_backlog = $hasattr($cur_nzb_provider, 'enable_backlog') #set $can_backlog = $hasattr($cur_nzb_provider, 'enable_backlog')
@ -488,7 +488,7 @@
## ##
## ##
#for $cur_torrent_provider in $sickgear.USE_TORRENTS and [$cur_provider for $cur_provider in $sickgear.providers.sortedProviderList() #for $cur_torrent_provider in $sickgear.USE_TORRENTS and [$cur_provider for $cur_provider in $sickgear.providers.sorted_sources()
if $cur_provider.providerType == $GenericProvider.TORRENT] or []: if $cur_provider.providerType == $GenericProvider.TORRENT] or []:
<div class="providerDiv" id="${cur_torrent_provider.get_id()}Div"> <div class="providerDiv" id="${cur_torrent_provider.get_id()}Div">
#if callable(getattr(cur_torrent_provider, 'ui_string', None)) #if callable(getattr(cur_torrent_provider, 'ui_string', None))

View file

@ -319,7 +319,7 @@
</div> </div>
#end if #end if
#set $anyQualities, $bestQualities = $Quality.splitQuality(int($show_obj.quality)) #set $anyQualities, $bestQualities = $Quality.split_quality(int($show_obj.quality))
#if $show_obj.quality in $qualityPresets #if $show_obj.quality in $qualityPresets
<div> <div>
<span class="details-title">Quality</span> <span class="details-title">Quality</span>

View file

@ -202,7 +202,7 @@
<div class="field-pair"> <div class="field-pair">
#set $qualities = $common.Quality.splitQuality(int($show_obj.quality)) #set $qualities = $common.Quality.split_quality(int($show_obj.quality))
#set global $any_qualities = $qualities[0] #set global $any_qualities = $qualities[0]
#set global $best_qualities = $qualities[1] #set global $best_qualities = $qualities[1]
#include $os.path.join($sg_str('PROG_DIR'), 'gui/slick/interfaces/default/inc_qualityChooser.tmpl') #include $os.path.join($sg_str('PROG_DIR'), 'gui/slick/interfaces/default/inc_qualityChooser.tmpl')

View file

@ -133,7 +133,7 @@
<tbody> <tbody>
#for $hItem in $history_results #for $hItem in $history_results
#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($hItem['action'])) #set $curStatus, $curQuality = $Quality.split_composite_status(int($hItem['action']))
#set $display_name = '<span data-sort="%s">%s - S%02iE%02i</span>' % ( #set $display_name = '<span data-sort="%s">%s - S%02iE%02i</span>' % (
$hItem['data_name'], $hItem['data_name'],
(('<span class="article">%s</span> %s' % ($hItem['name1'], $hItem['name2'])), $hItem['show_name'])[$sg_var('SORT_ARTICLE') or not $hItem['name1']], (('<span class="article">%s</span> %s' % ($hItem['name1'], $hItem['name2'])), $hItem['show_name'])[$sg_var('SORT_ARTICLE') or not $hItem['name1']],
@ -141,7 +141,7 @@
<tr> <tr>
#set $curdatetime = $datetime.datetime.strptime(str($hItem['date']), $history.dateFormat) #set $curdatetime = $datetime.datetime.strptime(str($hItem['date']), $history.dateFormat)
<td><div class="${fuzzydate}" data-sort="$time.mktime($curdatetime.timetuple())">$SGDatetime.sbfdatetime($curdatetime, show_seconds=True)</div></td> <td><div class="${fuzzydate}" data-sort="$time.mktime($curdatetime.timetuple())">$SGDatetime.sbfdatetime($curdatetime, show_seconds=True)</div></td>
<td class="tvShow"><a href="$sbRoot/home/view-show?tvid_prodid=$hItem['tvid_prodid']#season-$hItem['season']">$display_name#if $Quality.splitCompositeStatus($hItem['action'])[0] == $SNATCHED_PROPER then ' <span class="quality Proper">Proper</span>' else ''#</a></td> <td class="tvShow"><a href="$sbRoot/home/view-show?tvid_prodid=$hItem['tvid_prodid']#season-$hItem['season']">$display_name#if $Quality.split_composite_status($hItem['action'])[0] == $SNATCHED_PROPER then ' <span class="quality Proper">Proper</span>' else ''#</a></td>
<td#echo ('', ' class="subtitles_column"')[$SUBTITLED == $curStatus]#> <td#echo ('', ' class="subtitles_column"')[$SUBTITLED == $curStatus]#>
#if $SUBTITLED == $curStatus #if $SUBTITLED == $curStatus
<img width="16" height="11" src="$sbRoot/images/flags/<%= hItem["resource"][len(hItem["resource"])-6:len(hItem["resource"])-4] + '.png' %>"> <img width="16" height="11" src="$sbRoot/images/flags/<%= hItem["resource"][len(hItem["resource"])-6:len(hItem["resource"])-4] + '.png' %>">
@ -156,7 +156,7 @@
#else #else
#if '-1' != $hItem['provider'] and len($hItem['provider']) #if '-1' != $hItem['provider'] and len($hItem['provider'])
#if $curStatus in $SNATCHED_ANY + [$FAILED] #if $curStatus in $SNATCHED_ANY + [$FAILED]
#set $provider = $providers.getProviderClass($generic.GenericProvider.make_id($hItem['provider'])) #set $provider = $providers.get_by_id($generic.GenericProvider.make_id($hItem['provider']))
#if None is not $provider #if None is not $provider
<img src="$sbRoot/images/providers/<%= provider.image_name() %>" width="16" height="16" /><span>$provider.name</span> <img src="$sbRoot/images/providers/<%= provider.image_name() %>" width="16" height="16" /><span>$provider.name</span>
#else #else
@ -207,10 +207,10 @@
#set $order = 1 #set $order = 1
#set $ordinal_indicators = {'1':'st', '2':'nd', '3':'rd'} #set $ordinal_indicators = {'1':'st', '2':'nd', '3':'rd'}
#for $action in reversed($hItem['actions']) #for $action in reversed($hItem['actions'])
#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($action['action'])) #set $curStatus, $curQuality = $Quality.split_composite_status(int($action['action']))
#set $basename = $os.path.basename($action['resource']) #set $basename = $os.path.basename($action['resource'])
#if $curStatus in $SNATCHED_ANY + [$FAILED] #if $curStatus in $SNATCHED_ANY + [$FAILED]
#set $provider = $providers.getProviderClass($generic.GenericProvider.make_id($action['provider'])) #set $provider = $providers.get_by_id($generic.GenericProvider.make_id($action['provider']))
#if None is not $provider #if None is not $provider
#set $prov_list += ['<span%s><img class="help" src="%s/images/providers/%s" width="16" height="16" alt="%s" title="%s.. %s: %s" /></span>'\ #set $prov_list += ['<span%s><img class="help" src="%s/images/providers/%s" width="16" height="16" alt="%s" title="%s.. %s: %s" /></span>'\
% (('', ' class="fail"')[$FAILED == $curStatus], $sbRoot, $provider.image_name(), $provider.name, % (('', ' class="fail"')[$FAILED == $curStatus], $sbRoot, $provider.image_name(), $provider.name,
@ -262,7 +262,7 @@
#if $sg_var('USE_SUBTITLES') #if $sg_var('USE_SUBTITLES')
<td> <td>
#for $action in reversed($hItem['actions']) #for $action in reversed($hItem['actions'])
#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($action['action'])) #set $curStatus, $curQuality = $Quality.split_composite_status(int($action['action']))
#if $SUBTITLED == $curStatus #if $SUBTITLED == $curStatus
<img src="$sbRoot/images/subtitles/<%= action['provider'] + '.png' %>" width="16" height="16" alt="$action['provider']" title="<%= action['provider'].capitalize() %>:$os.path.basename($action['resource'])" /> <img src="$sbRoot/images/subtitles/<%= action['provider'] + '.png' %>" width="16" height="16" alt="$action['provider']" title="<%= action['provider'].capitalize() %>:$os.path.basename($action['resource'])" />
<span> / </span> <span> / </span>
@ -575,7 +575,7 @@
#for $hItem in $stat_results #for $hItem in $stat_results
<tr> <tr>
<td class="provider text-nowrap"> <td class="provider text-nowrap">
#set $provider = $providers.getProviderClass($generic.GenericProvider.make_id($hItem['provider'])) #set $provider = $providers.get_by_id($generic.GenericProvider.make_id($hItem['provider']))
#if None is not $provider #if None is not $provider
<img src="$sbRoot/images/providers/<%= provider.image_name() %>" width="16" height="16"><span data-sort="$hItem['provider']">$provider.name</span> <img src="$sbRoot/images/providers/<%= provider.image_name() %>" width="16" height="16"><span data-sort="$hItem['provider']">$provider.name</span>
#else #else
@ -628,7 +628,7 @@
</thead> </thead>
#set global $row = 0 #set global $row = 0
<tbody> <tbody>
#for $cur_provider in $sorted($sickgear.newznabProviderList, key=lambda x: x.last_recent_search or SGDatetime(2000,1,1), reverse=True) #for $cur_provider in $sorted($sickgear.newznab_providers, key=lambda x: x.last_recent_search or SGDatetime(2000,1,1), reverse=True)
#set $last_rls_date = '-' #set $last_rls_date = '-'
#set $last_rls_age = None #set $last_rls_age = None
#set $last_rls_age_str = '-' #set $last_rls_age_str = '-'

View file

@ -99,7 +99,7 @@
<div id="$cur_list_id" class="clearfix container" style="clear:both#if 'poster' == $layout#;padding:0#end if#"> <div id="$cur_list_id" class="clearfix container" style="clear:both#if 'poster' == $layout#;padding:0#end if#">
<div class="posterview"> <div class="posterview">
## ##
#for $curLoadingShow in $sickgear.show_queue_scheduler.action.loadingShowList #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loading_showlist
## ##
#if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList') #if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList')
#continue #continue
@ -292,7 +292,7 @@
</tfoot> </tfoot>
<tbody> <tbody>
#for $curLoadingShow in $sickgear.show_queue_scheduler.action.loadingShowList #for $curLoadingShow in $sickgear.show_queue_scheduler.action.loading_showlist
#if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList') #if $curLoadingShow.show_obj != None and $curLoadingShow.show_obj in $sg_str('showList')
#continue #continue
#end if #end if

View file

@ -18,7 +18,7 @@
</div> </div>
<div class="field-pair"> <div class="field-pair">
#set $qualities = $Quality.splitQuality($sg_var('QUALITY_DEFAULT', SD)) #set $qualities = $Quality.split_quality($sg_var('QUALITY_DEFAULT', SD))
#set global $any_qualities = $qualities[0] #set global $any_qualities = $qualities[0]
#set global $best_qualities = $qualities[1] #set global $best_qualities = $qualities[1]
#include $os.path.join($sg_str('PROG_DIR'), 'gui/slick/interfaces/default/inc_qualityChooser.tmpl') #include $os.path.join($sg_str('PROG_DIR'), 'gui/slick/interfaces/default/inc_qualityChooser.tmpl')

View file

@ -25,7 +25,7 @@
#set $ep_str = '%sx%s' % $ep_key #set $ep_str = '%sx%s' % $ep_key
#set $epLoc = $ep['location'] #set $epLoc = $ep['location']
#set never_aired = 0 < int($ep['season']) and 1 == int($ep['airdate']) #set never_aired = 0 < int($ep['season']) and 1 == int($ep['airdate'])
<tr class="#echo ' '.join([$Overview.overviewStrings[$ep_cats[$ep_str]], ('', 'airdate-never')[$never_aired], ('', 'archived')[$ARCHIVED == $Quality.splitCompositeStatus(int($ep['status']))[0]]])#"> <tr class="#echo ' '.join([$Overview.overviewStrings[$ep_cats[$ep_str]], ('', 'airdate-never')[$never_aired], ('', 'archived')[$ARCHIVED == $Quality.split_composite_status(int($ep['status']))[0]]])#">
<td class="col-checkbox"> <td class="col-checkbox">
<input type="checkbox" class="epCheck #echo 'hide' if $UNAIRED == int($ep['status']) else ''#" id="$ep_str" name="$ep_str"> <input type="checkbox" class="epCheck #echo 'hide' if $UNAIRED == int($ep['status']) else ''#" id="$ep_str" name="$ep_str">
</td> </td>
@ -99,7 +99,7 @@
</td> </td>
#end if #end if
#slurp #slurp
#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($ep['status'])) #set $curStatus, $curQuality = $Quality.split_composite_status(int($ep['status']))
#if Quality.NONE != $curQuality #if Quality.NONE != $curQuality
<td class="col-status">#if $SUBTITLED == $curStatus#<span class="addQTip" title="$statusStrings[$curStatus]"><i class="sgicon-subtitles" style="vertical-align:middle"></i></span>#else#$statusStrings[$curStatus].replace('Downloaded', '')#end if# #if 'Unknown' != $statusStrings[$curStatus]#<span class="quality $Quality.get_quality_css($curQuality)#if $downloaded# addQTip" title="$downloaded#end if#">$Quality.get_quality_ui($curQuality)</span>#end if#</td> <td class="col-status">#if $SUBTITLED == $curStatus#<span class="addQTip" title="$statusStrings[$curStatus]"><i class="sgicon-subtitles" style="vertical-align:middle"></i></span>#else#$statusStrings[$curStatus].replace('Downloaded', '')#end if# #if 'Unknown' != $statusStrings[$curStatus]#<span class="quality $Quality.get_quality_css($curQuality)#if $downloaded# addQTip" title="$downloaded#end if#">$Quality.get_quality_ui($curQuality)</span>#end if#</td>
#else #else
@ -107,7 +107,7 @@
#end if #end if
<td class="col-search"> <td class="col-search">
#if 0 != int($ep['season']) #if 0 != int($ep['season'])
#set $status = $Quality.splitCompositeStatus(int($ep['status']))[0] #set $status = $Quality.split_composite_status(int($ep['status']))[0]
#if ($status in $SNATCHED_ANY + [$DOWNLOADED, $ARCHIVED]) and $sg_var('USE_FAILED_DOWNLOADS') #if ($status in $SNATCHED_ANY + [$DOWNLOADED, $ARCHIVED]) and $sg_var('USE_FAILED_DOWNLOADS')
<a class="ep-retry" href="$sbRoot/home/episode-retry?tvid_prodid=$show_obj.tvid_prodid&amp;season=$ep['season']&amp;episode=$ep['episode']"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry download"></a> <a class="ep-retry" href="$sbRoot/home/episode-retry?tvid_prodid=$show_obj.tvid_prodid&amp;season=$ep['season']&amp;episode=$ep['episode']"><img src="$sbRoot/images/search16.png" height="16" alt="retry" title="Retry download"></a>
#else #else

View file

@ -1,12 +1,11 @@
#import sickgear #import sickgear
#from sickgear.common import Quality, qualityPresets, qualityPresetStrings #from sickgear.common import Quality, qualityPresets, qualityPresetStrings
#from _23 import filter_list
## ##
#set $html_checked = ' checked="checked"' #set $html_checked = ' checked="checked"'
#set $html_selected = ' selected="selected"' #set $html_selected = ' selected="selected"'
<div class="field-pair"> <div class="field-pair">
<label for="quality-preset" class="clearfix"> <label for="quality-preset" class="clearfix">
#set $overall_quality = $Quality.combineQualities($any_qualities, $best_qualities) #set $overall_quality = $Quality.combine_qualities($any_qualities, $best_qualities)
<span class="component-title input">Quality to download</span> <span class="component-title input">Quality to download</span>
<span class="component-desc"> <span class="component-desc">
#set $selected = None #set $selected = None
@ -35,7 +34,7 @@
<span id="wanted-quality" class="component-desc"> <span id="wanted-quality" class="component-desc">
<p>select one or more qualities; the best one found when searching will be used</p> <p>select one or more qualities; the best one found when searching will be used</p>
#set $any_quality_list = filter_list(lambda x: x > $Quality.NONE and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $any_quality_list = list(filter(lambda x: x > $Quality.NONE and x < $Quality.UNKNOWN, $Quality.qualityStrings))
#set $has_unknown = False #set $has_unknown = False
#for $cur_quality in sorted($any_quality_list): #for $cur_quality in sorted($any_quality_list):
##set $has_unknown |= ($Quality.UNKNOWN == $cur_quality and $cur_quality in $any_qualities) ##set $has_unknown |= ($Quality.UNKNOWN == $cur_quality and $cur_quality in $any_qualities)
@ -61,7 +60,7 @@
</div> </div>
<span id="upgrade-quality" class="component-desc"> <span id="upgrade-quality" class="component-desc">
<p>optional, upgrade existing media to any selected quality</p> <p>optional, upgrade existing media to any selected quality</p>
#set $best_quality_list = filter_list(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $best_quality_list = list(filter(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings))
#for $cur_quality in sorted($best_quality_list): #for $cur_quality in sorted($best_quality_list):
<a href="#" data-quality="$cur_quality" class="btn btn-inverse dark-bg#echo ('', ' active')[$cur_quality in $best_qualities]#" role="button"><i class="icon-glyph searchadd"></i>$Quality.get_quality_ui($cur_quality)</a> <a href="#" data-quality="$cur_quality" class="btn btn-inverse dark-bg#echo ('', ' active')[$cur_quality in $best_qualities]#" role="button"><i class="icon-glyph searchadd"></i>$Quality.get_quality_ui($cur_quality)</a>
#if $cur_quality in [$Quality.SDDVD, $Quality.FULLHDTV, $Quality.FULLHDBLURAY] #if $cur_quality in [$Quality.SDDVD, $Quality.FULLHDTV, $Quality.FULLHDBLURAY]
@ -84,7 +83,7 @@
<span class="component-desc bfr"> <span class="component-desc bfr">
<div style="float:left;padding-right:28px"> <div style="float:left;padding-right:28px">
<h4 class="jumbo">Wanted</h4> <h4 class="jumbo">Wanted</h4>
#set $any_quality_list = filter_list(lambda x: x > $Quality.NONE, $Quality.qualityStrings) #set $any_quality_list = list(filter(lambda x: x > $Quality.NONE, $Quality.qualityStrings))
<select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($any_quality_list)" class="form-control form-control-inline input-sm"> <select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($any_quality_list)" class="form-control form-control-inline input-sm">
#for $cur_quality in sorted($any_quality_list): #for $cur_quality in sorted($any_quality_list):
@ -95,7 +94,7 @@
<div style="float:left;padding-right:20px"> <div style="float:left;padding-right:20px">
<h4 class="jumbo">Upgrade to</h4> <h4 class="jumbo">Upgrade to</h4>
#set $best_quality_list = filter_list(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings) #set $best_quality_list = list(filter(lambda x: x > $Quality.SDTV and x < $Quality.UNKNOWN, $Quality.qualityStrings))
<select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($best_quality_list)" class="form-control form-control-inline input-sm"> <select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($best_quality_list)" class="form-control form-control-inline input-sm">
#for $cur_quality in sorted($best_quality_list): #for $cur_quality in sorted($best_quality_list):
<option value="$cur_quality"#echo ('', $html_selected)[$cur_quality in $best_qualities]#>$Quality.get_quality_ui($cur_quality)</option> <option value="$cur_quality"#echo ('', $html_selected)[$cur_quality in $best_qualities]#>$Quality.get_quality_ui($cur_quality)</option>

View file

@ -105,7 +105,7 @@
#except #except
#pass #pass
#end try #end try
#if not any([x in $body_attr for x in ['back-art', 'pro', 'ii']]) #if not any(x in $body_attr for x in ['back-art', 'pro', 'ii'])
#set $parts = $body_attr.split('class="') #set $parts = $body_attr.split('class="')
#set $body_attr = ('class="%s '.join($parts), $parts[0] + ' class="%s"')[1 == len($parts)] % {0: '', 1: 'pro', 2: 'pro ii'}.get(getattr($sickgear, 'DISPLAY_SHOW_VIEWMODE', 0)) #set $body_attr = ('class="%s '.join($parts), $parts[0] + ' class="%s"')[1 == len($parts)] % {0: '', 1: 'pro', 2: 'pro ii'}.get(getattr($sickgear, 'DISPLAY_SHOW_VIEWMODE', 0))
#end if #end if
@ -222,7 +222,7 @@
#for item in $history_compact #for item in $history_compact
#if 'tvid_prodid' in $item #if 'tvid_prodid' in $item
#set $action = $item['actions'][0] #set $action = $item['actions'][0]
#set $curStatus, $curQuality = $Quality.splitCompositeStatus(int($action['action'])) #set $curStatus, $curQuality = $Quality.split_composite_status(int($action['action']))
#set $status = None #set $status = None
#if $curStatus in $SNATCHED_ANY + [$FAILED] #if $curStatus in $SNATCHED_ANY + [$FAILED]
#set $status = 'snatched' #set $status = 'snatched'
@ -395,6 +395,13 @@
</div> </div>
#end if #end if
## ##
#if $sickgear.MEMCACHE.get('DEPRECATE_PP_LEGACY')
<div style="background-color:#a00; margin-top:35px; padding:5px 5px 1px 5px">
<p>Mar 2020: The `<em><span style="color:#fff">nzbToMedia</em></span>` script began using the <em><span style="color:#fff">secure</em></span> API to process media</p>
<p>Mar 2023: To remove this red box, please follow this <a target="_blank" href="https://github.com/SickGear/SickGear/wiki/FAQ-nzbToMedia"><span style="color:#fff">guidance</span></a></p>
</div>
#end if
##
#set $items = [] #set $items = []
#try #try
#set void = $items.append($topmenu) #set void = $items.append($topmenu)

View file

@ -188,37 +188,37 @@ $xsrf_form_html
#set $show_size = $max if not $show_loc else $get_size($show_loc) #set $show_size = $max if not $show_loc else $get_size($show_loc)
#set $option_state = '<input type="checkbox"%s class="%s-check"%s>' #set $option_state = '<input type="checkbox"%s class="%s-check"%s>'
## ##
#set $curUpdate_disabled = $sickgear.show_queue_scheduler.action.isBeingUpdated($cur_show_obj)\ #set $curUpdate_disabled = $sickgear.show_queue_scheduler.action.is_being_updated($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInUpdateQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_update_queue($cur_show_obj)
#set $tip = ' title="Update%s"' % ('', $disabled_inprogress_tip)[$curUpdate_disabled] #set $tip = ' title="Update%s"' % ('', $disabled_inprogress_tip)[$curUpdate_disabled]
#set $curUpdate = ($tip, $option_state % (('', $disabled)[$curUpdate_disabled], 'update', $tip)) #set $curUpdate = ($tip, $option_state % (('', $disabled)[$curUpdate_disabled], 'update', $tip))
## ##
#set $curRefresh_disabled = $sickgear.show_queue_scheduler.action.isBeingRefreshed($cur_show_obj)\ #set $curRefresh_disabled = $sickgear.show_queue_scheduler.action.is_being_refreshed($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj)
#set $tip = ' title="Rescan%s"' % ('', $disabled_inprogress_tip)[$curRefresh_disabled] #set $tip = ' title="Rescan%s"' % ('', $disabled_inprogress_tip)[$curRefresh_disabled]
#set $curRefresh = ($tip, $option_state % (('', $disabled)[$curRefresh_disabled], 'refresh', $tip)) #set $curRefresh = ($tip, $option_state % (('', $disabled)[$curRefresh_disabled], 'refresh', $tip))
## ##
#set $curRename_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ #set $curRename_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj)
#set $tip = ' title="Rename%s"' % ('', $disabled_inprogress_tip)[$curRename_disabled] #set $tip = ' title="Rename%s"' % ('', $disabled_inprogress_tip)[$curRename_disabled]
#set $curRename = ($tip, $option_state % (('', $disabled)[$curRename_disabled], 'rename', $tip)) #set $curRename = ($tip, $option_state % (('', $disabled)[$curRename_disabled], 'rename', $tip))
## ##
#set $subtitles_disabled = not $cur_show_obj.subtitles\ #set $subtitles_disabled = not $cur_show_obj.subtitles\
or $sickgear.show_queue_scheduler.action.isBeingSubtitled($cur_show_obj)\ or $sickgear.show_queue_scheduler.action.is_being_subtitled($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInSubtitleQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_subtitle_queue($cur_show_obj)
#set $tip = (' title="Search subtitle"', (' title="Search subtitle%s"' % $disabled_inprogress_tip, #set $tip = (' title="Search subtitle"', (' title="Search subtitle%s"' % $disabled_inprogress_tip,
$disabled_subtitles_tip)[not $cur_show_obj.subtitles])[$subtitles_disabled] $disabled_subtitles_tip)[not $cur_show_obj.subtitles])[$subtitles_disabled]
#set $curSubtitle = ($tip, $option_state % (('', $disabled)[$subtitles_disabled], 'subtitle', $tip)) #set $curSubtitle = ($tip, $option_state % (('', $disabled)[$subtitles_disabled], 'subtitle', $tip))
## ##
#set $curDelete_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ #set $curDelete_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj)\ or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj)
#set $tip = ' title="Delete%s"' % ('', $disabled_inprogress_tip)[$curDelete_disabled] #set $tip = ' title="Delete%s"' % ('', $disabled_inprogress_tip)[$curDelete_disabled]
#set $curDelete = ($tip, $option_state % (('', $disabled)[$curDelete_disabled], 'delete', $tip)) #set $curDelete = ($tip, $option_state % (('', $disabled)[$curDelete_disabled], 'delete', $tip))
## ##
#set $curRemove_disabled = $sickgear.show_queue_scheduler.action.isBeingRenamed($cur_show_obj)\ #set $curRemove_disabled = $sickgear.show_queue_scheduler.action.is_being_renamed($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRenameQueue($cur_show_obj)\ or $sickgear.show_queue_scheduler.action.is_in_rename_queue($cur_show_obj)\
or $sickgear.show_queue_scheduler.action.isInRefreshQueue($cur_show_obj) or $sickgear.show_queue_scheduler.action.is_in_refresh_queue($cur_show_obj)
#set $tip = ' title="Remove%s"' % ('', $disabled_inprogress_tip)[$curRemove_disabled] #set $tip = ' title="Remove%s"' % ('', $disabled_inprogress_tip)[$curRemove_disabled]
#set $curRemove = ($tip, $option_state % (('', $disabled)[$curRemove_disabled], 'remove', $tip)) #set $curRemove = ($tip, $option_state % (('', $disabled)[$curRemove_disabled], 'remove', $tip))
<tr data-tvid_prodid="$cur_show_obj.tvid_prodid" data-size="$show_size"> <tr data-tvid_prodid="$cur_show_obj.tvid_prodid" data-size="$show_size">

View file

@ -62,7 +62,7 @@
<tbody> <tbody>
#set $order = $oldest #set $order = $oldest
#for $hItem in $failed_results[::-1] #for $hItem in $failed_results[::-1]
#set $provider = $providers.getProviderClass($generic.GenericProvider.make_id($hItem['provider'])) #set $provider = $providers.get_by_id($generic.GenericProvider.make_id($hItem['provider']))
#set $provider_name = None is not $provider and $provider.name or 'missing provider' #set $provider_name = None is not $provider and $provider.name or 'missing provider'
#set $provider_image = None is not $provider and $provider.image_name() or 'missing.png' #set $provider_image = None is not $provider and $provider.image_name() or 'missing.png'
<tr> <tr>

View file

@ -2,7 +2,6 @@
#from sickgear.common import Quality, qualityPresets, qualityPresetStrings, SD #from sickgear.common import Quality, qualityPresets, qualityPresetStrings, SD
#from sickgear.indexers.indexer_config import TVINFO_TVMAZE, TVINFO_TVDB #from sickgear.indexers.indexer_config import TVINFO_TVMAZE, TVINFO_TVDB
#from lib import exceptions_helper as exceptions #from lib import exceptions_helper as exceptions
#from _23 import filter_list
<% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp# <% def sg_var(varname, default=False): return getattr(sickgear, varname, default) %>#slurp#
<% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp# <% def sg_str(varname, default=''): return getattr(sickgear, varname, default) %>#slurp#
## ##
@ -19,7 +18,7 @@
#else: #else:
#set $initial_quality = $SD #set $initial_quality = $SD
#end if #end if
#set $anyQualities, $bestQualities = $Quality.splitQuality($sg_var('QUALITY_DEFAULT', $initial_quality)) #set $anyQualities, $bestQualities = $Quality.split_quality($sg_var('QUALITY_DEFAULT', $initial_quality))
<script type="text/javascript" src="$sbRoot/js/qualityChooser.js?v=$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/qualityChooser.js?v=$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/massEdit.js?v=$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/massEdit.js?v=$sbPID"></script>
@ -69,7 +68,7 @@
<div id="custom-quality" class="show-if-quality-custom"> <div id="custom-quality" class="show-if-quality-custom">
<div class="manageCustom pull-left"> <div class="manageCustom pull-left">
<h4 style="font-size:14px">Initial</h4> <h4 style="font-size:14px">Initial</h4>
#set $anyQualityList = filter_list(lambda x: x > $Quality.NONE, $Quality.qualityStrings) #set $anyQualityList = list(filter(lambda x: x > $Quality.NONE, $Quality.qualityStrings))
<select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($anyQualityList)"> <select id="wanted-qualities" name="any_qualities" multiple="multiple" size="$len($anyQualityList)">
#for $curQuality in sorted($anyQualityList): #for $curQuality in sorted($anyQualityList):
<option value="$curQuality" #if $curQuality in $anyQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option> <option value="$curQuality" #if $curQuality in $anyQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option>
@ -78,7 +77,7 @@
</div> </div>
<div class="manageCustom pull-left"> <div class="manageCustom pull-left">
<h4 style="font-size:14px">Upgrade to</h4> <h4 style="font-size:14px">Upgrade to</h4>
#set $bestQualityList = filter_list(lambda x: x > $Quality.SDTV, $Quality.qualityStrings) #set $bestQualityList = list(filter(lambda x: x > $Quality.SDTV, $Quality.qualityStrings))
<select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($bestQualityList)"> <select id="upgrade-qualities" name="best_qualities" multiple="multiple" size="$len($bestQualityList)">
#for $curQuality in sorted($bestQualityList): #for $curQuality in sorted($bestQualityList):
<option value="$curQuality" #if $curQuality in $bestQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option> <option value="$curQuality" #if $curQuality in $bestQualities then $selected else ''#>$Quality.get_quality_ui($curQuality)</option>

View file

@ -3,7 +3,6 @@
#from sickgear.common import * #from sickgear.common import *
#from sickgear.logger import reverseNames #from sickgear.logger import reverseNames
#from sickgear.helpers import maybe_plural #from sickgear.helpers import maybe_plural
#from _23 import list_keys
## ##
#set global $header = 'Log File' #set global $header = 'Log File'
#set global $title = 'Logs' #set global $title = 'Logs'
@ -23,7 +22,7 @@
<div class="h2footer pull-right"> <div class="h2footer pull-right">
<select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm pull-right"> <select name="minLevel" id="minLevel" class="form-control form-control-inline input-sm pull-right">
#set $levels = $list_keys($reverseNames) #set $levels = $list($reverseNames)
#set void = $levels.sort(key=lambda x: $reverseNames[$x]) #set void = $levels.sort(key=lambda x: $reverseNames[$x])
#set $level_count = len($levels) #set $level_count = len($levels)
#for $level in $levels #for $level in $levels

View file

@ -15,12 +15,25 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import datetime from base64 import encodebytes as b64encodebytes
from collections import deque from collections import deque
from itertools import islice # noinspection PyUnresolvedReferences
from configparser import ConfigParser
# noinspection PyUnresolvedReferences
from enum import Enum
from itertools import islice, zip_longest
# noinspection PyUnresolvedReferences
from inspect import getfullargspec as getargspec
# noinspection PyUnresolvedReferences
from os import scandir, DirEntry
# noinspection PyUnresolvedReferences
from subprocess import Popen
from sys import version_info from sys import version_info
from six import binary_type, moves import datetime
# noinspection PyUnresolvedReferences, PyPep8Naming
import xml.etree.ElementTree as etree
# noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences
from six.moves.urllib.parse import quote, quote_plus, unquote as six_unquote, unquote_plus as six_unquote_plus, \ from six.moves.urllib.parse import quote, quote_plus, unquote as six_unquote, unquote_plus as six_unquote_plus, \
urlencode, urlsplit, urlunparse, urlunsplit urlencode, urlsplit, urlunparse, urlunsplit
@ -40,30 +53,24 @@ if False:
# noinspection PyTypeChecker # noinspection PyTypeChecker
urlencode = urlsplit = urlunparse = urlunsplit = None # type: Callable urlencode = urlsplit = urlunparse = urlunsplit = None # type: Callable
PY38 = version_info[0:2] >= (3, 8)
""" one off consumables (Iterators) """
filter_iter = moves.filter # type: Callable[[Callable, Iterable], Iterator]
map_iter = moves.map # type: Callable[[Callable, ...], Iterator]
def map_consume(*args): def map_consume(*args):
# type: (...) -> None # type: (...) -> None
"""Run a lambda over elements without returning anything""" """Run a lambda over elements without returning anything"""
deque(moves.map(*args), maxlen=0) deque(map(*args), maxlen=0)
def consume(iterator, n=None): def consume(iterator, n=None):
# type: (Iterator, Optional[int]) -> None # type: (Iterator, Optional[int]) -> None
"""Advance the iterator n-steps ahead. If n is None, consume entirely. Returns nothing. """Advance the iterator n-steps ahead. If n is None, consume entirely. Returns nothing.
Useful if a method returns a Iterator but it's not used, but still all should be called, Useful if a method returns an Iterator that is not used, but still all should be called,
for example if each iter element calls a function that should be called for all or for example if each iter element calls a function that should be called for all or
given amount of elements in Iterator given amount of elements in Iterator
examples: examples:
consume(filter_iter(...)) # consumes all elements of given function that returns a Iterator consume(filter_iter(...)) # consumes all elements of given function that returns an Iterator
consume(filter_iter(...), 3) # consumes next 3 elements of given function that returns a Iterator consume(filter_iter(...), 3) # consumes next 3 elements of given function that returns an Iterator
""" """
# Use functions that consume iterators at C speed. # Use functions that consume iterators at C speed.
if n is None: if n is None:
@ -76,7 +83,7 @@ def consume(iterator, n=None):
def decode_str(s, encoding='utf-8', errors=None): def decode_str(s, encoding='utf-8', errors=None):
# type: (...) -> AnyStr # type: (...) -> AnyStr
if isinstance(s, binary_type): if isinstance(s, bytes):
if None is errors: if None is errors:
return s.decode(encoding) return s.decode(encoding)
return s.decode(encoding, errors) return s.decode(encoding, errors)
@ -99,7 +106,7 @@ def html_unescape(s):
def list_range(*args, **kwargs): def list_range(*args, **kwargs):
# type: (...) -> List # type: (...) -> List
return list(moves.range(*args, **kwargs)) return list(range(*args, **kwargs))
def urlparse(url, scheme='', allow_fragments=True): def urlparse(url, scheme='', allow_fragments=True):
@ -135,181 +142,26 @@ def b64encodestring(s, keep_eol=False):
return data.rstrip() return data.rstrip()
if 2 != version_info[0]: native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float]
# ---------
# Python 3+
# ---------
# noinspection PyUnresolvedReferences,PyProtectedMember
from base64 import decodebytes, encodebytes
b64decodebytes = decodebytes
b64encodebytes = encodebytes
# noinspection PyUnresolvedReferences,PyCompatibility
from configparser import ConfigParser
# noinspection PyUnresolvedReferences
from enum import Enum
# noinspection PyUnresolvedReferences
from os import scandir, DirEntry
# noinspection PyUnresolvedReferences
from itertools import zip_longest
# noinspection PyUnresolvedReferences
from inspect import getfullargspec as getargspec
# noinspection PyUnresolvedReferences
from subprocess import Popen
# noinspection PyUnresolvedReferences, PyPep8Naming def unquote(string, encoding='utf-8', errors='replace'):
import xml.etree.ElementTree as etree return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
encoding, errors)
ordered_dict = dict
native_timestamp = datetime.datetime.timestamp # type: Callable[[datetime.datetime], float] def unquote_plus(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
encoding, errors)
def unquote(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote(decode_str(string, encoding, errors), encoding=encoding, errors=errors),
encoding, errors)
def unquote_plus(string, encoding='utf-8', errors='replace'): def decode_bytes(d, encoding='utf-8', errors='replace'):
return decode_str(six_unquote_plus(decode_str(string, encoding, errors), encoding=encoding, errors=errors), if not isinstance(d, bytes):
encoding, errors) # noinspection PyArgumentList
return bytes(d, encoding=encoding, errors=errors)
return d
def decode_bytes(d, encoding='utf-8', errors='replace'):
if not isinstance(d, binary_type):
# noinspection PyArgumentList
return bytes(d, encoding=encoding, errors=errors)
return d
def filter_list(*args): def map_none(*args):
# type: (...) -> List # type: (...) -> List
return list(filter(*args)) return list(zip_longest(*args))
def list_items(d):
# type: (Dict) -> List[Tuple[Any, Any]]
"""
equivalent to python 2 .items()
"""
return list(d.items())
def list_keys(d):
# type: (Dict) -> List
"""
equivalent to python 2 .keys()
"""
return list(d)
def list_values(d):
# type: (Dict) -> List
"""
equivalent to python 2 .values()
"""
return list(d.values())
def map_list(*args):
# type: (...) -> List
return list(map(*args))
def map_none(*args):
# type: (...) -> List
return list(zip_longest(*args))
def unidecode(data):
# type: (AnyStr) -> AnyStr
return data
else:
# ---------
# Python 2
# ---------
import time
from lib.unidecode import unidecode as unicode_decode
# noinspection PyProtectedMember,PyDeprecation
from base64 import decodestring, encodestring
# noinspection PyDeprecation
b64decodebytes = decodestring
# noinspection PyDeprecation
b64encodebytes = encodestring
# noinspection PyUnresolvedReferences
from lib.backports.configparser import ConfigParser
# noinspection PyUnresolvedReferences
from lib.enum34 import Enum
# noinspection PyProtectedMember,PyUnresolvedReferences
from lib.scandir.scandir import scandir, GenericDirEntry as DirEntry
# noinspection PyUnresolvedReferences,PyDeprecation
from inspect import getargspec
try:
# noinspection PyPep8Naming
import xml.etree.cElementTree as etree
except ImportError:
# noinspection PyPep8Naming
import xml.etree.ElementTree as etree
from collections import OrderedDict
ordered_dict = OrderedDict
def _totimestamp(dt=None):
# type: (datetime.datetime) -> float
""" This function should only be used in this module due to its 1970s+ limitation as that's all we need here and
sgdatatime can't be used at this module level
"""
return time.mktime(dt.timetuple())
native_timestamp = _totimestamp # type: Callable[[datetime.datetime], float]
from subprocess import Popen as _Popen
class Popen(_Popen):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
for x in filter_iter(lambda y: y, [self.stdout, self.stderr, self.stdin]):
x.close()
self.wait()
def unquote(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote(decode_str(string, encoding, errors)), encoding, errors)
def unquote_plus(string, encoding='utf-8', errors='replace'):
return decode_str(six_unquote_plus(decode_str(string, encoding, errors)), encoding, errors)
# noinspection PyUnusedLocal
def decode_bytes(d, encoding='utf-8', errors='replace'):
if not isinstance(d, binary_type):
return bytes(d)
return d
def filter_list(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return filter(*args)
def list_items(d):
# type: (Dict) -> List[Tuple[Any, Any]]
# noinspection PyTypeChecker
return d.items()
def list_keys(d):
# type: (Dict) -> List
# noinspection PyTypeChecker
return d.keys()
def list_values(d):
# type: (Dict) -> List
# noinspection PyTypeChecker
return d.values()
def map_list(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return map(*args)
def map_none(*args):
# type: (...) -> List
# noinspection PyTypeChecker
return map(None, *args)
def unidecode(data):
# type: (AnyStr) -> AnyStr
# noinspection PyUnresolvedReferences
return isinstance(data, unicode) and unicode_decode(data) or data

View file

@ -22,7 +22,7 @@ import threading
from datetime import timedelta from datetime import timedelta
from time import sleep, time from time import sleep, time
from _23 import ConfigParser from configparser import ConfigParser
from .aniDBlink import AniDBLink from .aniDBlink import AniDBLink
from .aniDBcommands import * from .aniDBcommands import *

View file

@ -21,7 +21,6 @@ from lib.tvinfo_base import CastList, PersonGenders, RoleTypes, \
from json_helper import json_dumps from json_helper import json_dumps
from sg_helpers import clean_data, get_url, iterate_chunk, try_int from sg_helpers import clean_data, get_url, iterate_chunk, try_int
from _23 import filter_list
from six import iteritems from six import iteritems
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -682,12 +681,12 @@ class TmdbIndexer(TVInfoBase):
season_cast_obj['id'] for season_cast_obj in season_cast_obj['id'] for season_cast_obj in
season_data[season_obj[0]].get('cast') or []]) season_data[season_obj[0]].get('cast') or []])
for person_obj in sorted(filter_list(lambda a: a['id'] in main_cast_ids, for person_obj in sorted(list(filter(lambda a: a['id'] in main_cast_ids,
show_data['aggregate_credits']['cast'] or [])[:50], show_data['aggregate_credits']['cast'] or []))[:50],
key=lambda c: (main_cast_ids.get(c['id'], 0) or 0, key=lambda c: (main_cast_ids.get(c['id'], 0) or 0,
c['total_episode_count'], c['order'] * -1), reverse=True): c['total_episode_count'], c['order'] * -1), reverse=True):
for character in sorted(filter_list(lambda b: b['credit_id'] in main_cast_credit_ids, for character in sorted(list(filter(lambda b: b['credit_id'] in main_cast_credit_ids,
person_obj.get('roles', []) or []), person_obj.get('roles', []) or [])),
key=lambda c: c['episode_count'], reverse=True): key=lambda c: c['episode_count'], reverse=True):
character_obj = TVInfoCharacter( character_obj = TVInfoCharacter(
name=clean_data(character['character']), name=clean_data(character['character']),

View file

@ -277,7 +277,7 @@ class TraktAPI(object):
code = getattr(e.response, 'status_code', None) code = getattr(e.response, 'status_code', None)
if not code: if not code:
if 'timed out' in ex(e): if 'timed out' in ex(e):
log.warning(u'Timeout connecting to Trakt') log.warning('Timeout connecting to Trakt')
if count >= self.max_retrys: if count >= self.max_retrys:
raise TraktTimeout() raise TraktTimeout()
return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry,
@ -285,12 +285,12 @@ class TraktAPI(object):
# This is pretty much a fatal error if there is no status_code # This is pretty much a fatal error if there is no status_code
# It means there basically was no response at all # It means there basically was no response at all
else: else:
log.warning(u'Could not connect to Trakt. Error: %s' % ex(e)) log.warning('Could not connect to Trakt. Error: %s' % ex(e))
raise TraktException('Could not connect to Trakt. Error: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Error: %s' % ex(e))
elif 502 == code: elif 502 == code:
# Retry the request, Cloudflare had a proxying issue # Retry the request, Cloudflare had a proxying issue
log.warning(u'Retrying Trakt api request: %s' % path) log.warning(f'Retrying Trakt api request: {path}')
if count >= self.max_retrys: if count >= self.max_retrys:
raise TraktCloudFlareException() raise TraktCloudFlareException()
return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry,
@ -303,7 +303,7 @@ class TraktAPI(object):
return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry,
send_oauth=send_oauth, method=method) send_oauth=send_oauth, method=method)
log.warning(u'Unauthorized. Please check your Trakt settings') log.warning('Unauthorized. Please check your Trakt settings')
sickgear.TRAKT_ACCOUNTS[send_oauth].auth_failure() sickgear.TRAKT_ACCOUNTS[send_oauth].auth_failure()
raise TraktAuthException() raise TraktAuthException()
@ -318,18 +318,18 @@ class TraktAPI(object):
raise TraktAuthException() raise TraktAuthException()
elif code in (500, 501, 503, 504, 520, 521, 522): elif code in (500, 501, 503, 504, 520, 521, 522):
if count >= self.max_retrys: if count >= self.max_retrys:
log.warning(u'Trakt may have some issues and it\'s unavailable. Code: %s' % code) log.warning(f'Trakt may have some issues and it\'s unavailable. Code: {code}')
raise TraktServerError(error_code=code) raise TraktServerError(error_code=code)
# http://docs.trakt.apiary.io/#introduction/status-codes # http://docs.trakt.apiary.io/#introduction/status-codes
log.warning(u'Trakt may have some issues and it\'s unavailable. Trying again') log.warning('Trakt may have some issues and it\'s unavailable. Trying again')
return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry, return self.trakt_request(path, data, headers, url, count=count, sleep_retry=sleep_retry,
send_oauth=send_oauth, method=method) send_oauth=send_oauth, method=method)
elif 404 == code: elif 404 == code:
log.warning(u'Trakt error (404) the resource does not exist: %s%s' % (url, path)) log.warning(f'Trakt error (404) the resource does not exist: {url}{path}')
raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path)) raise TraktMethodNotExisting('Trakt error (404) the resource does not exist: %s%s' % (url, path))
elif 429 == code: elif 429 == code:
if count >= self.max_retrys: if count >= self.max_retrys:
log.warning(u'Trakt replied with Rate-Limiting, maximum retries exceeded.') log.warning('Trakt replied with Rate-Limiting, maximum retries exceeded.')
raise TraktServerError(error_code=code) raise TraktServerError(error_code=code)
r_headers = getattr(e.response, 'headers', None) r_headers = getattr(e.response, 'headers', None)
if None is not r_headers: if None is not r_headers:
@ -356,14 +356,14 @@ class TraktAPI(object):
'revoked, does not match the redirection URI used in the authorization request,' 'revoked, does not match the redirection URI used in the authorization request,'
' or was issued to another client.') ' or was issued to another client.')
else: else:
log.error(u'Could not connect to Trakt. Code error: {0}'.format(code)) log.error('Could not connect to Trakt. Code error: {0}'.format(code))
raise TraktException('Could not connect to Trakt. Code error: %s' % code) raise TraktException('Could not connect to Trakt. Code error: %s' % code)
except ConnectionSkipException as e: except ConnectionSkipException as e:
log.warning('Connection is skipped') log.warning('Connection is skipped')
raise e raise e
except ValueError as e: except ValueError as e:
log.error(u'Value Error: %s' % ex(e)) log.error(f'Value Error: {ex(e)}')
raise TraktValueError(u'Value Error: %s' % ex(e)) raise TraktValueError(f'Value Error: {ex(e)}')
except (BaseException, Exception) as e: except (BaseException, Exception) as e:
log.error('Exception: %s' % ex(e)) log.error('Exception: %s' % ex(e))
raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e)) raise TraktException('Could not connect to Trakt. Code error: %s' % ex(e))

View file

@ -39,7 +39,6 @@ from lib.tvinfo_base import CastList, TVInfoCharacter, CrewList, TVInfoPerson, R
from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired
from .tvdb_ui import BaseUI, ConsoleUI from .tvdb_ui import BaseUI, ConsoleUI
from _23 import filter_list, list_keys, list_values, map_list
from six import integer_types, iteritems, PY2, string_types from six import integer_types, iteritems, PY2, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -139,7 +138,7 @@ class Tvdb(TVInfoBase):
"""Create easy-to-use interface to name of season/episode name """Create easy-to-use interface to name of season/episode name
>> t = Tvdb() >> t = Tvdb()
>> t['Scrubs'][1][24]['episodename'] >> t['Scrubs'][1][24]['episodename']
u'My Last Day' 'My Last Day'
""" """
map_languages = {} map_languages = {}
reverse_map_languages = {v: k for k, v in iteritems(map_languages)} reverse_map_languages = {v: k for k, v in iteritems(map_languages)}
@ -202,7 +201,7 @@ class Tvdb(TVInfoBase):
>> t = Tvdb(actors=True) >> t = Tvdb(actors=True)
>> t['scrubs']['actors'][0]['name'] >> t['scrubs']['actors'][0]['name']
u'Zach Braff' 'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass): custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option) A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
@ -290,7 +289,7 @@ class Tvdb(TVInfoBase):
'nl': 'nld', 'no': 'nor', 'nl': 'nld', 'no': 'nor',
'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp', 'pl': 'pol', 'pt': 'pot', 'ru': 'rus', 'sk': 'slv', 'sv': 'swe', 'zh': 'zho', '_1': 'srp',
} }
self.config['valid_languages_3'] = list_values(self.config['langabbv_23']) self.config['valid_languages_3'] = list(self.config['langabbv_23'].values())
# TheTvdb.com should be based around numeric language codes, # TheTvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
@ -358,7 +357,7 @@ class Tvdb(TVInfoBase):
else: else:
d_m = shows d_m = shows
if d_m: if d_m:
results = map_list(map_data, [d_m['data']]) results = list(map(map_data, [d_m['data']]))
if ids.get(TVINFO_TVDB_SLUG): if ids.get(TVINFO_TVDB_SLUG):
cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG]) cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG])
is_none, shows = self._get_cache_entry(cache_id_key) is_none, shows = self._get_cache_entry(cache_id_key)
@ -373,7 +372,7 @@ class Tvdb(TVInfoBase):
if d_m: if d_m:
for r in d_m: for r in d_m:
if ids.get(TVINFO_TVDB_SLUG) == r['slug']: if ids.get(TVINFO_TVDB_SLUG) == r['slug']:
results = map_list(map_data, [r]) results = list(map(map_data, [r]))
break break
if name: if name:
for n in ([name], name)[isinstance(name, list)]: for n in ([name], name)[isinstance(name, list)]:
@ -390,7 +389,7 @@ class Tvdb(TVInfoBase):
if r: if r:
if not isinstance(r, list): if not isinstance(r, list):
r = [r] r = [r]
results.extend(map_list(map_data, r)) results.extend(list(map(map_data, r)))
seen = set() seen = set()
results = [seen.add(r['id']) or r for r in results if r['id'] not in seen] results = [seen.add(r['id']) or r for r in results if r['id'] not in seen]
@ -581,7 +580,7 @@ class Tvdb(TVInfoBase):
data_list.append(cr) data_list.append(cr)
resp['data'] = data_list resp['data'] = data_list
return resp return resp
return dict([(u'data', (None, resp)[isinstance(resp, string_types)])]) return dict([('data', (None, resp)[isinstance(resp, string_types)])])
def _getetsrc(self, url, params=None, language=None, parse_json=False): def _getetsrc(self, url, params=None, language=None, parse_json=False):
"""Loads a URL using caching """Loads a URL using caching
@ -613,8 +612,8 @@ class Tvdb(TVInfoBase):
# type: (int, Optional[str]) -> Optional[dict] # type: (int, Optional[str]) -> Optional[dict]
results = self.search_tvs(sid, language=language) results = self.search_tvs(sid, language=language)
for cur_result in (isinstance(results, dict) and results.get('results') or []): for cur_result in (isinstance(results, dict) and results.get('results') or []):
result = filter_list(lambda r: 'series' == r['type'] and sid == r['id'], result = list(filter(lambda r: 'series' == r['type'] and sid == r['id'],
cur_result.get('nbHits') and cur_result.get('hits') or []) cur_result.get('nbHits') and cur_result.get('hits') or []))
if 1 == len(result): if 1 == len(result):
result[0]['overview'] = self.clean_overview( result[0]['overview'] = self.clean_overview(
result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng']) result[0]['overviews'][self.config['langabbv_23'].get(language) or 'eng'])
@ -627,7 +626,7 @@ class Tvdb(TVInfoBase):
# notify of new keys # notify of new keys
if ENV.get('SG_DEV_MODE'): if ENV.get('SG_DEV_MODE'):
new_keys = set(list_keys(result[0])).difference({ new_keys = set(list(result[0])).difference({
'_highlightResult', 'aliases', 'banner', '_highlightResult', 'aliases', 'banner',
'fanart', 'firstaired', 'follower_count', 'fanart', 'firstaired', 'follower_count',
'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable', 'id', 'image', 'is_tvdb_searchable', 'is_tvt_searchable',
@ -788,7 +787,7 @@ class Tvdb(TVInfoBase):
series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'], series_found = self._getetsrc(self.config['url_search_series'], params=self.config['params_search_series'],
language=self.config['language']) language=self.config['language'])
if series_found: if series_found:
return list_values(series_found)[0] return list(series_found.values())[0]
except (BaseException, Exception): except (BaseException, Exception):
pass pass
@ -899,15 +898,15 @@ class Tvdb(TVInfoBase):
try: try:
for cur_result in (isinstance(results, dict) and results.get('results') or []): for cur_result in (isinstance(results, dict) and results.get('results') or []):
# sorts 'banners/images/missing/' to last before filter # sorts 'banners/images/missing/' to last before filter
people = filter_list( people = list(filter(
lambda r: 'person' == r['type'] lambda r: 'person' == r['type']
and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''), and rc_clean.sub(name, '') == rc_clean.sub(r['name'], ''),
cur_result.get('nbHits') cur_result.get('nbHits')
and sorted(cur_result.get('hits'), and sorted(cur_result.get('hits'),
key=lambda x: len(x['image']), reverse=True) or []) key=lambda x: len(x['image']), reverse=True) or []))
if ENV.get('SG_DEV_MODE'): if ENV.get('SG_DEV_MODE'):
for person in people: for person in people:
new_keys = set(list_keys(person)).difference({ new_keys = set(list(person)).difference({
'_highlightResult', 'banner', 'id', 'image', '_highlightResult', 'banner', 'id', 'image',
'is_tvdb_searchable', 'is_tvt_searchable', 'name', 'is_tvdb_searchable', 'is_tvt_searchable', 'name',
'objectID', 'people_birthdate', 'people_died', 'objectID', 'people_birthdate', 'people_died',
@ -1016,14 +1015,14 @@ class Tvdb(TVInfoBase):
url_image = self._make_image(self.config['url_artworks'], image_data['data'][0]['filename']) url_image = self._make_image(self.config['url_artworks'], image_data['data'][0]['filename'])
url_thumb = self._make_image(self.config['url_artworks'], image_data['data'][0]['thumbnail']) url_thumb = self._make_image(self.config['url_artworks'], image_data['data'][0]['thumbnail'])
self._set_show_data(sid, image_type, url_image) self._set_show_data(sid, image_type, url_image)
self._set_show_data(sid, u'%s_thumb' % image_type, url_thumb) self._set_show_data(sid, f'{image_type}_thumb', url_thumb)
excluded_main_data = True # artwork found so prevent fallback excluded_main_data = True # artwork found so prevent fallback
self._parse_banners(sid, image_data['data']) self._parse_banners(sid, image_data['data'])
self.shows[sid].__dict__[loaded_name] = True self.shows[sid].__dict__[loaded_name] = True
# fallback image thumbnail for none excluded_main_data if artwork is not found # fallback image thumbnail for none excluded_main_data if artwork is not found
if not excluded_main_data and show_data['data'].get(image_type): if not excluded_main_data and show_data['data'].get(image_type):
self._set_show_data(sid, u'%s_thumb' % image_type, self._set_show_data(sid, f'{image_type}_thumb',
re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I)) re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I))
def _get_show_data(self, def _get_show_data(self,
@ -1068,11 +1067,11 @@ class Tvdb(TVInfoBase):
else: else:
show_data = {'data': {}} show_data = {'data': {}}
for img_type, en_type, p_type in [(u'poster', 'posters_enabled', posters), for img_type, en_type, p_type in [('poster', 'posters_enabled', posters),
(u'banner', 'banners_enabled', banners), ('banner', 'banners_enabled', banners),
(u'fanart', 'fanart_enabled', fanart), ('fanart', 'fanart_enabled', fanart),
(u'season', 'seasons_enabled', seasons), ('season', 'seasons_enabled', seasons),
(u'seasonwide', 'seasonwides_enabled', seasonwides)]: ('seasonwide', 'seasonwides_enabled', seasonwides)]:
self._parse_images(sid, language, show_data, img_type, en_type, p_type) self._parse_images(sid, language, show_data, img_type, en_type, p_type)
if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False): if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False):
@ -1176,9 +1175,9 @@ class Tvdb(TVInfoBase):
else: else:
page += 1 page += 1
ep_map_keys = {'absolutenumber': u'absolute_number', 'airedepisodenumber': u'episodenumber', ep_map_keys = {'absolutenumber': 'absolute_number', 'airedepisodenumber': 'episodenumber',
'airedseason': u'seasonnumber', 'airedseasonid': u'seasonid', 'airedseason': 'seasonnumber', 'airedseasonid': 'seasonid',
'dvdepisodenumber': u'dvd_episodenumber', 'dvdseason': u'dvd_season'} 'dvdepisodenumber': 'dvd_episodenumber', 'dvdseason': 'dvd_season'}
for cur_ep in episodes: for cur_ep in episodes:
if self.config['dvdorder']: if self.config['dvdorder']:

View file

@ -17,8 +17,8 @@ It must have a method "select_series", this is passed a list of dicts, each dict
contains the the keys "name" (human readable show name), and "sid" (the shows contains the the keys "name" (human readable show name), and "sid" (the shows
ID as on thetvdb.com). For example: ID as on thetvdb.com). For example:
[{'name': u'Lost', 'sid': u'73739'}, [{'name': 'Lost', 'sid': '73739'},
{'name': u'Lost Universe', 'sid': u'73181'}] {'name': 'Lost Universe', 'sid': '73181'}]
The "select_series" method must return the appropriate dict, or it can raise The "select_series" method must return the appropriate dict, or it can raise
tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show

View file

@ -27,7 +27,6 @@ from lib.tvinfo_base import TVInfoBase, TVInfoImage, TVInfoImageSize, TVInfoImag
crew_type_names, TVInfoPerson, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoNetwork, TVInfoSeason, \ crew_type_names, TVInfoPerson, RoleTypes, TVInfoShow, TVInfoEpisode, TVInfoIDs, TVInfoNetwork, TVInfoSeason, \
PersonGenders, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB PersonGenders, TVINFO_TVMAZE, TVINFO_TVDB, TVINFO_IMDB
from _23 import filter_iter
from six import integer_types, iteritems, string_types from six import integer_types, iteritems, string_types
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
@ -683,7 +682,7 @@ class TvMaze(TVInfoBase):
premieres = [] premieres = []
returning = [] returning = []
rc_lang = re.compile('(?i)eng|jap') rc_lang = re.compile('(?i)eng|jap')
for cur_show in filter_iter(lambda s: 1 == s.episode_number and ( for cur_show in filter(lambda s: 1 == s.episode_number and (
None is s.show.language or rc_lang.search(s.show.language)), schedule): None is s.show.language or rc_lang.search(s.show.language)), schedule):
if 1 == cur_show.season_number: if 1 == cur_show.season_number:
premieres += [cur_show] premieres += [cur_show]

File diff suppressed because it is too large Load diff

View file

@ -1,274 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import os
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
import pathlib
except ImportError:
pathlib = None
from io import open
import sys
try:
from thread import get_ident
except ImportError:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
__all__ = ['UserDict', 'OrderedDict', 'open']
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
native_str = str
str = type('str')
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc
# from reprlib 3.2.1
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
# from collections 3.2.1
class _ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
# can't use 'key in mapping' with defaultdict
return mapping[key]
except KeyError:
pass
# support subclasses that define __missing__
return self.__missing__(key)
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
# reuses stored hash values if possible
return len(set().union(*self.maps))
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps))
)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
"""
New ChainMap or subclass with a new copy of
maps[0] and refs to maps[1:]
"""
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
"""
Remove and return an item pair from maps[0].
Raise KeyError is maps[0] is empty.
"""
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
"""
Remove *key* from maps[0] and return its value.
Raise KeyError if *key* not in maps[0].
"""
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from collections import ChainMap
except ImportError:
ChainMap = _ChainMap
_ABC = getattr(
abc,
'ABC',
# Python 3.3 compatibility
abc.ABCMeta(native_str('__ABC'), (object,), dict(__metaclass__=abc.ABCMeta)),
)
class _PathLike(_ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return bool(
hasattr(subclass, '__fspath__')
# workaround for Python 3.5
or pathlib
and issubclass(subclass, pathlib.Path)
)
PathLike = getattr(os, 'PathLike', _PathLike)
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
if not hasattr(path, '__fspath__') and isinstance(path, pathlib.Path):
# workaround for Python 3.5
return str(path)
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError(
"expected str, bytes or os.PathLike object, "
"not " + path_type.__name__
)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError(
"expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__, type(path_repr).__name__)
)
fspath = getattr(os, 'fspath', _fspath)

View file

@ -1,196 +0,0 @@
from __future__ import absolute_import
import functools
from collections import namedtuple
from threading import RLock
_CacheInfo = namedtuple("_CacheInfo", ["hits", "misses", "maxsize", "currsize"])
@functools.wraps(functools.update_wrapper)
def update_wrapper(
wrapper,
wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES,
):
"""
Patch two bugs in functools.update_wrapper.
"""
# workaround for http://bugs.python.org/issue3445
assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.update_wrapper(wrapper, wrapped, assigned, updated)
# workaround for https://bugs.python.org/issue17482
wrapper.__wrapped__ = wrapped
return wrapper
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(
args,
kwds,
typed,
kwd_mark=(object(),),
fasttypes=set([int, str, frozenset, type(None)]),
sorted=sorted,
tuple=tuple,
type=type,
len=len,
):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False): # noqa: C901
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(
key, root
) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it
# to the front of the list
(root,) = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
(root,) = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function

View file

@ -1,204 +0,0 @@
"""The match_hostname() function from Python 3.7.0, essential when using SSL."""
import sys
import socket as _socket
try:
# Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not
from _ssl import SSLCertVerificationError
CertificateError = SSLCertVerificationError
except:
class CertificateError(ValueError):
pass
__version__ = '3.7.0.1'
# Divergence: Added to deal with ipaddess as bytes on python2
def _to_text(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
elif sys.version_info >= (3,) and isinstance(obj, bytes):
obj = str(obj, encoding='ascii', errors='strict')
return obj
def _to_bytes(obj):
if isinstance(obj, str) and sys.version_info >= (3,):
obj = bytes(obj, encoding='ascii', errors='strict')
elif sys.version_info < (3,) and isinstance(obj, unicode):
obj = obj.encode('ascii', 'strict')
return obj
def _dnsname_match(dn, hostname):
"""Matching according to RFC 6125, section 6.4.3
- Hostnames are compared lower case.
- For IDNA, both dn and hostname must be encoded as IDN A-label (ACE).
- Partial wildcards like 'www*.example.org', multiple wildcards, sole
wildcard or wildcards in labels other then the left-most label are not
supported and a CertificateError is raised.
- A wildcard must match at least one character.
"""
if not dn:
return False
wildcards = dn.count('*')
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
if wildcards > 1:
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"too many wildcards in certificate DNS name: %s" % repr(dn))
dn_leftmost, sep, dn_remainder = dn.partition('.')
if '*' in dn_remainder:
# Only match wildcard in leftmost segment.
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"wildcard can only be present in the leftmost label: "
"%s." % repr(dn))
if not sep:
# no right side
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"sole wildcard without additional labels are not support: "
"%s." % repr(dn))
if dn_leftmost != '*':
# no partial wildcard matching
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"partial wildcards in leftmost label are not supported: "
"%s." % repr(dn))
hostname_leftmost, sep, hostname_remainder = hostname.partition('.')
if not hostname_leftmost or not sep:
# wildcard must match at least one char
return False
return dn_remainder.lower() == hostname_remainder.lower()
def _inet_paton(ipname):
"""Try to convert an IP address to packed binary form
Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6
support.
"""
# inet_aton() also accepts strings like '1'
# Divergence: We make sure we have native string type for all python versions
try:
b_ipname = _to_bytes(ipname)
except UnicodeError:
raise ValueError("%s must be an all-ascii string." % repr(ipname))
# Set ipname in native string format
if sys.version_info < (3,):
n_ipname = b_ipname
else:
n_ipname = ipname
if n_ipname.count('.') == 3:
try:
return _socket.inet_aton(n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, _socket.error, TypeError):
pass
try:
return _socket.inet_pton(_socket.AF_INET6, n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, _socket.error, TypeError):
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is neither an IPv4 nor an IP6 "
"address." % repr(ipname))
except AttributeError:
# AF_INET6 not available
pass
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is not an IPv4 address." % repr(ipname))
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
ip = _inet_paton(ipname.rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed.
The function matches IP addresses rather than dNSNames if hostname is a
valid ipaddress string. IPv4 addresses are supported on all platforms.
IPv6 addresses are supported on platforms with IPv6 support (AF_INET6
and inet_pton).
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence: Deal with hostname as bytes
host_ip = _inet_paton(_to_text(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence: Deal with hostname as byte strings.
# IP addresses should be all ascii, so we consider it not
# an IP address if this fails
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")

View file

@ -1,216 +0,0 @@
"""
Patch recently added ABCs into the standard lib module
``collections.abc`` (Py3) or ``collections`` (Py2).
Usage::
import backports_abc
backports_abc.patch()
or::
try:
from collections.abc import Generator
except ImportError:
from backports_abc import Generator
"""
try:
import collections.abc as _collections_abc
except ImportError:
import collections as _collections_abc
def get_mro(cls):
try:
return cls.__mro__
except AttributeError:
return old_style_mro(cls)
def old_style_mro(cls):
yield cls
for base in cls.__bases__:
for c in old_style_mro(base):
yield c
def mk_gen():
from abc import abstractmethod
required_methods = (
'__iter__', '__next__' if hasattr(iter(()), '__next__') else 'next',
'send', 'throw', 'close')
class Generator(_collections_abc.Iterator):
__slots__ = ()
if '__next__' in required_methods:
def __next__(self):
return self.send(None)
else:
def next(self):
return self.send(None)
@abstractmethod
def send(self, value):
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError('generator ignored GeneratorExit')
@classmethod
def __subclasshook__(cls, C):
if cls is Generator:
mro = get_mro(C)
for method in required_methods:
for base in mro:
if method in base.__dict__:
break
else:
return NotImplemented
return True
return NotImplemented
generator = type((lambda: (yield))())
Generator.register(generator)
return Generator
def mk_awaitable():
from abc import abstractmethod, ABCMeta
@abstractmethod
def __await__(self):
yield
@classmethod
def __subclasshook__(cls, C):
if cls is Awaitable:
for B in get_mro(C):
if '__await__' in B.__dict__:
if B.__dict__['__await__']:
return True
break
return NotImplemented
# calling metaclass directly as syntax differs in Py2/Py3
Awaitable = ABCMeta('Awaitable', (), {
'__slots__': (),
'__await__': __await__,
'__subclasshook__': __subclasshook__,
})
return Awaitable
def mk_coroutine():
from abc import abstractmethod
class Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the coroutine.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError('coroutine ignored GeneratorExit')
@classmethod
def __subclasshook__(cls, C):
if cls is Coroutine:
mro = get_mro(C)
for method in ('__await__', 'send', 'throw', 'close'):
for base in mro:
if method in base.__dict__:
break
else:
return NotImplemented
return True
return NotImplemented
return Coroutine
###
# make all ABCs available in this module
try:
Generator = _collections_abc.Generator
except AttributeError:
Generator = mk_gen()
try:
Awaitable = _collections_abc.Awaitable
except AttributeError:
Awaitable = mk_awaitable()
try:
Coroutine = _collections_abc.Coroutine
except AttributeError:
Coroutine = mk_coroutine()
try:
from inspect import isawaitable
except ImportError:
def isawaitable(obj):
return isinstance(obj, Awaitable)
###
# allow patching the stdlib
PATCHED = {}
def patch(patch_inspect=True):
"""
Main entry point for patching the ``collections.abc`` and ``inspect``
standard library modules.
"""
PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator
PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine
PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable
if patch_inspect:
import inspect
PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable

View file

@ -16,20 +16,25 @@
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
import random import random
from six import moves
# Browser apps represented in data # Browser apps represented in data
# noinspection PyUnresolvedReferences
__all__ = ['chrome', 'opera', 'firefox', 'safari', 'ie'] __all__ = ['chrome', 'opera', 'firefox', 'safari', 'ie']
# noinspection PyUnreachableCode
if False:
from typing import AnyStr
def get_ua(): def get_ua():
# type: (...) -> AnyStr
""" """
Return a random browser user agent string Return a random browser user agent string
:return: A browser user agent string :return: A browser user agent
:rtype: String
""" """
ua = [] ua = []
for x in moves.xrange(1, 10): for x in range(1, 10):
ua += [random.choice(browser_ua.get(random.choice(__all__)))] ua += [random.choice(browser_ua.get(random.choice(__all__)))]
return random.choice(ua) return random.choice(ua)

View file

@ -1,5 +1,6 @@
import re import re
from bs4 import BeautifulSoup, SoupStrainer from bs4 import BeautifulSoup
from bs4.element import SoupStrainer
from six import iteritems from six import iteritems

View file

@ -77,7 +77,7 @@ def generate_key(key_size=4096, output_file='server.key'):
# Ported from cryptography docs/x509/tutorial.rst # Ported from cryptography docs/x509/tutorial.rst
def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', loc_name=None, org_name=None): def generate_local_cert(private_key, days_valid=3650, output_file='server.crt', loc_name=None, org_name=None):
def_name = u'SickGear' def_name = 'SickGear'
# Various details about who we are. For a self-signed certificate the # Various details about who we are. For a self-signed certificate the
# subject and issuer are always the same. # subject and issuer are always the same.
@ -88,7 +88,7 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt',
# build Subject Alternate Names (aka SAN) list # build Subject Alternate Names (aka SAN) list
# First the host names, add with x509.DNSName(): # First the host names, add with x509.DNSName():
san_list = [x509.DNSName(u'localhost')] san_list = [x509.DNSName('localhost')]
try: try:
thishostname = text_type(socket.gethostname()) thishostname = text_type(socket.gethostname())
san_list.append(x509.DNSName(thishostname)) san_list.append(x509.DNSName(thishostname))
@ -100,13 +100,13 @@ def generate_local_cert(private_key, days_valid=3650, output_file='server.crt',
try: try:
# noinspection PyCompatibility # noinspection PyCompatibility
from ipaddress import IPv4Address, IPv6Address from ipaddress import IPv4Address, IPv6Address
san_list.append(x509.IPAddress(IPv4Address(u'127.0.0.1'))) san_list.append(x509.IPAddress(IPv4Address('127.0.0.1')))
san_list.append(x509.IPAddress(IPv6Address(u'::1'))) san_list.append(x509.IPAddress(IPv6Address('::1')))
# append local v4 ip # append local v4 ip
mylocalipv4 = localipv4() mylocalipv4 = localipv4()
if mylocalipv4: if mylocalipv4:
san_list.append(x509.IPAddress(IPv4Address(u'' + mylocalipv4))) san_list.append(x509.IPAddress(IPv4Address('' + mylocalipv4)))
except (ImportError, Exception): except (ImportError, Exception):
pass pass

View file

@ -9,8 +9,6 @@ from io import BytesIO
from dateutil.tz import tzfile as _tzfile from dateutil.tz import tzfile as _tzfile
# noinspection PyPep8Naming
import encodingKludge as ek
import sickgear import sickgear
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] __all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
@ -27,10 +25,10 @@ class tzfile(_tzfile):
def getzoneinfofile_stream(): def getzoneinfofile_stream():
try: try:
# return BytesIO(get_data(__name__, ZONEFILENAME)) # return BytesIO(get_data(__name__, ZONEFILENAME))
zonefile = ek.ek(os.path.join, sickgear.ZONEINFO_DIR, ZONEFILENAME) zonefile = os.path.join(sickgear.ZONEINFO_DIR, ZONEFILENAME)
if not ek.ek(os.path.isfile, zonefile): if not os.path.isfile(zonefile):
warnings.warn('Falling back to included zoneinfo file') warnings.warn('Falling back to included zoneinfo file')
zonefile = ek.ek(os.path.join, ek.ek(os.path.dirname, __file__), ZONEFILENAME) zonefile = os.path.join(os.path.dirname(__file__), ZONEFILENAME)
with open(zonefile, 'rb') as f: with open(zonefile, 'rb') as f:
return BytesIO(f.read()) return BytesIO(f.read())
except IOError as e: # TODO switch to FileNotFoundError? except IOError as e: # TODO switch to FileNotFoundError?

View file

@ -39,7 +39,6 @@ def set_sys_encoding():
:return: The encoding that is set :return: The encoding that is set
""" """
sys_encoding = None
should_exit = False should_exit = False
try: try:
locale.setlocale(locale.LC_ALL, '') locale.setlocale(locale.LC_ALL, '')
@ -48,7 +47,7 @@ def set_sys_encoding():
try: try:
sys_encoding = locale.getpreferredencoding() sys_encoding = locale.getpreferredencoding()
except (locale.Error, IOError): except (locale.Error, IOError):
pass sys_encoding = None
# For OSes that are poorly configured I'll just randomly force UTF-8 # For OSes that are poorly configured I'll just randomly force UTF-8
if not sys_encoding or sys_encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): if not sys_encoding or sys_encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):

View file

@ -21,7 +21,7 @@ import string
import re import re
import struct import struct
from six import string_types, integer_types from six import string_types, integer_types
from _23 import decode_str, list_items from _23 import decode_str
__all__ = ['resolve'] __all__ = ['resolve']
@ -845,7 +845,7 @@ FOURCC = {
} }
# make it fool prove # make it fool prove
for code, value in list_items(FOURCC): for code, value in list(FOURCC.items()):
if not code.upper() in FOURCC: if not code.upper() in FOURCC:
FOURCC[code.upper()] = value FOURCC[code.upper()] = value
if code.endswith(' '): if code.endswith(' '):

View file

@ -14,11 +14,6 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with SickGear. If not, see <http://www.gnu.org/licenses/>. # along with SickGear. If not, see <http://www.gnu.org/licenses/>.
from six import PY2, string_types
if PY2:
from encodingKludge import fixStupidEncodings
# noinspection PyUnreachableCode # noinspection PyUnreachableCode
if False: if False:
from typing import AnyStr from typing import AnyStr
@ -28,128 +23,101 @@ def ex(e):
# type: (BaseException) -> AnyStr # type: (BaseException) -> AnyStr
"""Returns a unicode string from the exception text if it exists""" """Returns a unicode string from the exception text if it exists"""
if not PY2: return str(e)
return str(e)
e_message = u''
if not e or not e.args:
return e_message
for arg in e.args:
if None is not arg:
if isinstance(arg, string_types):
fixed_arg = fixStupidEncodings(arg, True)
else:
try:
fixed_arg = u'error ' + fixStupidEncodings(str(arg), True)
except (BaseException, Exception):
fixed_arg = None
if fixed_arg:
if not e_message:
e_message = fixed_arg
else:
e_message = e_message + ' : ' + fixed_arg
return e_message
class SickBeardException(Exception): # noinspection DuplicatedCode
class SickGearException(Exception):
"""Generic SickGear Exception - should never be thrown, only subclassed""" """Generic SickGear Exception - should never be thrown, only subclassed"""
class ConfigErrorException(SickBeardException): class ConfigErrorException(SickGearException):
"""Error in the config file""" """Error in the config file"""
class LaterException(SickBeardException): class LaterException(SickGearException):
"""Something bad happened that I'll make a real exception for later""" """Something bad happened that I'll make a real exception for later"""
class NoNFOException(SickBeardException): class NoNFOException(SickGearException):
"""No NFO was found!""" """No NFO was found!"""
class NoShowDirException(SickBeardException): class NoShowDirException(SickGearException):
"""Unable to find the show's directory""" """Unable to find the show's directory"""
class FileNotFoundException(SickBeardException): class FileNotFoundException(SickGearException):
"""The specified file doesn't exist""" """The specified file doesn't exist"""
class MultipleDBEpisodesException(SickBeardException): class MultipleDBEpisodesException(SickGearException):
"""Found multiple episodes in the DB! Must fix DB first""" """Found multiple episodes in the DB! Must fix DB first"""
class MultipleDBShowsException(SickBeardException): class MultipleDBShowsException(SickGearException):
"""Found multiple shows in the DB! Must fix DB first""" """Found multiple shows in the DB! Must fix DB first"""
class MultipleShowObjectsException(SickBeardException): class MultipleShowObjectsException(SickGearException):
"""Found multiple objects for the same show! Something is very wrong""" """Found multiple objects for the same show! Something is very wrong"""
class WrongShowException(SickBeardException): class WrongShowException(SickGearException):
"""The episode doesn't belong to the same show as its parent folder""" """The episode doesn't belong to the same show as its parent folder"""
class ShowNotFoundException(SickBeardException): class ShowNotFoundException(SickGearException):
"""The show wasn't found on the Indexer""" """The show wasn't found on the Indexer"""
class EpisodeNotFoundException(SickBeardException): class EpisodeNotFoundException(SickGearException):
"""The episode wasn't found on the Indexer""" """The episode wasn't found on the Indexer"""
class ShowDirNotFoundException(SickBeardException): class ShowDirNotFoundException(SickGearException):
"""The show dir doesn't exist""" """The show dir doesn't exist"""
class AuthException(SickBeardException): class AuthException(SickGearException):
"""Your authentication information is incorrect""" """Your authentication information is incorrect"""
class EpisodeDeletedException(SickBeardException): class EpisodeDeletedException(SickGearException):
"""This episode has been deleted""" """This episode has been deleted"""
class CantRefreshException(SickBeardException): class CantRefreshException(SickGearException):
"""The show can't be refreshed right now""" """The show can't be refreshed right now"""
class CantUpdateException(SickBeardException): class CantUpdateException(SickGearException):
"""The show can't be updated right now""" """The show can't be updated right now"""
class CantSwitchException(SickBeardException): class CantSwitchException(SickGearException):
"""The show can't be switched right now""" """The show can't be switched right now"""
class PostProcessingFailed(SickBeardException): class PostProcessingFailed(SickGearException):
"""Post-processing the episode failed""" """Post-processing the episode failed"""
class FailedProcessingFailed(SickBeardException): class FailedProcessingFailed(SickGearException):
"""Post-processing the failed release failed""" """Post-processing the failed release failed"""
class FailedHistoryMultiSnatchException(SickBeardException): class FailedHistoryMultiSnatchException(SickGearException):
"""Episode was snatched again before the first one was done""" """Episode was snatched again before the first one was done"""
class FailedHistoryNotFoundException(SickBeardException): class FailedHistoryNotFoundException(SickGearException):
"""The release was not found in the failed download history tracker""" """The release was not found in the failed download history tracker"""
class EpisodeNotFoundByAbsoluteNumberException(SickBeardException): class EpisodeNotFoundByAbsoluteNumberException(SickGearException):
"""The show wasn't found in the DB while looking at Absolute Numbers""" """The show wasn't found in the DB while looking at Absolute Numbers"""
class ConnectionSkipException(SickBeardException): class ConnectionSkipException(SickGearException):
"""Connection was skipped because of previous errors""" """Connection was skipped because of previous errors"""

View file

@ -32,4 +32,4 @@ __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
# this has to be at the top level, see how setup.py parses this # this has to be at the top level, see how setup.py parses this
#: Distribution version number. #: Distribution version number.
__version__ = "1.1" __version__ = "1.2-dev"

View file

@ -104,18 +104,15 @@ def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")] charRanges = [item.strip() for item in chars.split(" | ")]
rv = [] rv = []
for item in charRanges: for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange): for regexp in (reChar, reCharRange):
match = regexp.match(item) match = regexp.match(item)
if match is not None: if match is not None:
rv.append([hexToInt(item) for item in match.groups()]) rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1: if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2 rv[-1] = rv[-1] * 2
foundMatch = True
break break
if not foundMatch: else:
assert len(item) == 1 assert len(item) == 1
rv.append([ord(item)] * 2) rv.append([ord(item)] * 2)
rv = normaliseCharList(rv) rv = normaliseCharList(rv)
return rv return rv

View file

@ -324,7 +324,7 @@ class HTMLUnicodeInputStream(object):
except KeyError: except KeyError:
if __debug__: if __debug__:
for c in characters: for c in characters:
assert(ord(c) < 128) assert ord(c) < 128
regex = "".join(["\\x%02x" % ord(c) for c in characters]) regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite: if not opposite:
regex = "^%s" % regex regex = "^%s" % regex

View file

@ -557,23 +557,36 @@ headingElements = (
) )
voidElements = frozenset([ voidElements = frozenset([
"area",
"base", "base",
"command", "br",
"event-source", "col",
"command", # removed ^1
"embed",
"event-source", # renamed and later removed ^2
"hr",
"img",
"input",
"link", "link",
"meta", "meta",
"hr", "param", # deprecated ^3
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source", "source",
"track" "track",
"wbr",
]) ])
# Removals and deprecations in the HTML 5 spec:
# ^1: command
# http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2012-December/038472.html
# https://github.com/whatwg/html/commit/9e2e25f4ae90969a7c64e0763c98548a35b50af8
# ^2: event-source
# renamed to eventsource in 7/2008:
# https://github.com/whatwg/html/commit/d157945d0285b4463a04b57318da0c4b300a99e7
# removed entirely in 2/2009:
# https://github.com/whatwg/html/commit/43cbdbfbb7eb74b0d65e0f4caab2020c0b2a16ff
# ^3: param
# https://developer.mozilla.org/en-US/docs/Web/HTML/Element/param
cdataElements = frozenset(['title', 'textarea']) cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([ rcdataElements = frozenset([
@ -604,6 +617,7 @@ booleanAttributes = {
"button": frozenset(["disabled", "autofocus"]), "button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"ol": frozenset(["reversed"]),
"output": frozenset(["disabled", "readonly"]), "output": frozenset(["disabled", "readonly"]),
"iframe": frozenset(["seamless"]), "iframe": frozenset(["seamless"]),
} }

View file

@ -113,6 +113,7 @@ allowed_elements = frozenset((
(namespaces['html'], 'strike'), (namespaces['html'], 'strike'),
(namespaces['html'], 'strong'), (namespaces['html'], 'strong'),
(namespaces['html'], 'sub'), (namespaces['html'], 'sub'),
(namespaces['html'], 'summary'),
(namespaces['html'], 'sup'), (namespaces['html'], 'sup'),
(namespaces['html'], 'table'), (namespaces['html'], 'table'),
(namespaces['html'], 'tbody'), (namespaces['html'], 'tbody'),
@ -128,6 +129,7 @@ allowed_elements = frozenset((
(namespaces['html'], 'ul'), (namespaces['html'], 'ul'),
(namespaces['html'], 'var'), (namespaces['html'], 'var'),
(namespaces['html'], 'video'), (namespaces['html'], 'video'),
(namespaces['html'], 'wbr'),
(namespaces['mathml'], 'maction'), (namespaces['mathml'], 'maction'),
(namespaces['mathml'], 'math'), (namespaces['mathml'], 'math'),
(namespaces['mathml'], 'merror'), (namespaces['mathml'], 'merror'),
@ -363,6 +365,7 @@ allowed_attributes = frozenset((
(None, 'maxsize'), (None, 'maxsize'),
(None, 'minsize'), (None, 'minsize'),
(None, 'other'), (None, 'other'),
(None, 'reversed'),
(None, 'rowalign'), (None, 'rowalign'),
(None, 'rowalign'), (None, 'rowalign'),
(None, 'rowalign'), (None, 'rowalign'),
@ -373,6 +376,7 @@ allowed_attributes = frozenset((
(None, 'scriptlevel'), (None, 'scriptlevel'),
(None, 'selection'), (None, 'selection'),
(None, 'separator'), (None, 'separator'),
(None, 'start'),
(None, 'stretchy'), (None, 'stretchy'),
(None, 'width'), (None, 'width'),
(None, 'width'), (None, 'width'),
@ -594,6 +598,10 @@ allowed_css_properties = frozenset((
'height', 'height',
'letter-spacing', 'letter-spacing',
'line-height', 'line-height',
'max-height',
'min-height',
'max-width',
'min-width',
'overflow', 'overflow',
'pause', 'pause',
'pause-after', 'pause-after',

View file

@ -115,6 +115,9 @@ class HTMLParser(object):
if tree is None: if tree is None:
tree = treebuilders.getTreeBuilder("etree") tree = treebuilders.getTreeBuilder("etree")
elif isinstance(tree, str):
tree = treebuilders.getTreeBuilder(tree)
self.tree = tree(namespaceHTMLElements) self.tree = tree(namespaceHTMLElements)
self.errors = [] self.errors = []
@ -1002,8 +1005,8 @@ def getPhases(debug):
self.tree.insertText(token["data"]) self.tree.insertText(token["data"])
# This must be bad for performance # This must be bad for performance
if (self.parser.framesetOK and if (self.parser.framesetOK and
any([char not in spaceCharacters any(char not in spaceCharacters
for char in token["data"]])): for char in token["data"])):
self.parser.framesetOK = False self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token): def processSpaceCharactersNonPre(self, token):
@ -1850,7 +1853,7 @@ def getPhases(debug):
def flushCharacters(self): def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens]) data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]): if any(item not in spaceCharacters for item in data):
token = {"type": tokenTypes["Characters"], "data": data} token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token) self.parser.phases["inTable"].insertText(token)
elif data: elif data:

View file

@ -222,14 +222,14 @@ class HTMLSerializer(object):
self.strict = False self.strict = False
def encode(self, string): def encode(self, string):
assert(isinstance(string, text_type)) assert isinstance(string, text_type)
if self.encoding: if self.encoding:
return string.encode(self.encoding, "htmlentityreplace") return string.encode(self.encoding, "htmlentityreplace")
else: else:
return string return string
def encodeStrict(self, string): def encodeStrict(self, string):
assert(isinstance(string, text_type)) assert isinstance(string, text_type)
if self.encoding: if self.encoding:
return string.encode(self.encoding, "strict") return string.encode(self.encoding, "strict")
else: else:

View file

@ -121,6 +121,7 @@ class Node(object):
class ActiveFormattingElements(list): class ActiveFormattingElements(list):
def append(self, node): def append(self, node):
"""Append node to the end of the list."""
equalCount = 0 equalCount = 0
if node != Marker: if node != Marker:
for element in self[::-1]: for element in self[::-1]:

View file

@ -108,7 +108,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False):
node.parent = None node.parent = None
def insertText(self, data, insertBefore=None): def insertText(self, data, insertBefore=None):
if not(len(self._element)): if not len(self._element):
if not self._element.text: if not self._element.text:
self._element.text = "" self._element.text = ""
self._element.text += data self._element.text += data
@ -201,7 +201,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False):
rv = [] rv = []
def serializeElement(element, indent=0): def serializeElement(element, indent=0):
if not(hasattr(element, "tag")): if not hasattr(element, "tag"):
element = element.getroot() element = element.getroot()
if element.tag == "<!DOCTYPE>": if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"): if element.get("publicId") or element.get("systemId"):

View file

@ -37,7 +37,7 @@ def getETreeBuilder(ElementTreeImplementation):
else: else:
node = elt node = elt
if not(hasattr(node, "tag")): if not hasattr(node, "tag"):
node = node.getroot() node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):

View file

@ -265,9 +265,8 @@ class Auth(object):
except ValueError as e: except ValueError as e:
if not retry: if not retry:
cache.close() cache.close()
import encodingKludge as ek
import os import os
ek.ek(os.remove, ek.ek(os.path.join, self._cachedir, diskcache.core.DBNAME)) os.remove(os.path.join(self._cachedir, diskcache.core.DBNAME))
return self._get_creds(retry=True) return self._get_creds(retry=True)
else: else:
raise e raise e

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,608 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))

View file

@ -5,25 +5,58 @@ import functools
import contextlib import contextlib
import types import types
import importlib import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable from .abc import ResourceReader, Traversable
from ._compat import wrap_spec from ._compat import wrap_spec
Package = Union[types.ModuleType, str] Package = Union[types.ModuleType, str]
Anchor = Package
def files(package): def package_to_anchor(func):
# type: (Package) -> Traversable
""" """
Get a Traversable resource from a package Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
""" """
return from_package(get_package(package)) undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
def get_resource_reader(package): @package_to_anchor
# type: (types.ModuleType) -> Optional[ResourceReader] def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
""" """
Return the package's loader if it's a ResourceReader. Return the package's loader if it's a ResourceReader.
""" """
@ -39,24 +72,39 @@ def get_resource_reader(package):
return reader(spec.name) # type: ignore return reader(spec.name) # type: ignore
def resolve(cand): @functools.singledispatch
# type: (Package) -> types.ModuleType def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand) return cast(types.ModuleType, cand)
def get_package(package): @resolve.register
# type: (Package) -> types.ModuleType def _(cand: str) -> types.ModuleType:
"""Take a package name or module object and return the module. return importlib.import_module(cand)
Raise an exception if the resolved module is not a package.
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
""" """
resolved = resolve(package) Walk the stack and find the frame of the first caller not in this module.
if wrap_spec(resolved).submodule_search_locations is None: """
raise TypeError(f'{package!r} is not a package')
return resolved def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package): def from_package(package: types.ModuleType):
""" """
Return a Traversable object for the given package. Return a Traversable object for the given package.
@ -67,7 +115,14 @@ def from_package(package):
@contextlib.contextmanager @contextlib.contextmanager
def _tempfile(reader, suffix=''): def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows # blocks due to the need to close the temporary file to work on Windows
# properly. # properly.
@ -81,18 +136,35 @@ def _tempfile(reader, suffix=''):
yield pathlib.Path(raw_path) yield pathlib.Path(raw_path)
finally: finally:
try: try:
os.remove(raw_path) _os_remove(raw_path)
except FileNotFoundError: except FileNotFoundError:
pass pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch @functools.singledispatch
def as_file(path): def as_file(path):
""" """
Given a Traversable object, return that object as a Given a Traversable object, return that object as a
path on the local file system in a context manager. path on the local file system in a context manager.
""" """
return _tempfile(path.read_bytes, suffix=path.name) return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path) @as_file.register(pathlib.Path)
@ -102,3 +174,34 @@ def _(path):
Degenerate behavior for pathlib.Path objects. Degenerate behavior for pathlib.Path objects.
""" """
yield path yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.write_bytes(source.read_bytes())
return child

View file

@ -1,9 +1,12 @@
# flake8: noqa # flake8: noqa
import abc import abc
import os
import sys import sys
import pathlib import pathlib
from contextlib import suppress from contextlib import suppress
from typing import Union
if sys.version_info >= (3, 10): if sys.version_info >= (3, 10):
from zipfile import Path as ZipPath # type: ignore from zipfile import Path as ZipPath # type: ignore
@ -96,3 +99,10 @@ def wrap_spec(package):
from . import _adapters from . import _adapters
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
if sys.version_info >= (3, 9):
StrPath = Union[str, os.PathLike[str]]
else:
# PathLike is only subscriptable at runtime in 3.9+
StrPath = Union[str, "os.PathLike[str]"]

View file

@ -27,8 +27,7 @@ def deprecated(func):
return wrapper return wrapper
def normalize_path(path): def normalize_path(path: Any) -> str:
# type: (Any) -> str
"""Normalize a path by ensuring it is a string. """Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised. If the resulting string contains path separators, an exception is raised.

View file

@ -1,7 +1,13 @@
import abc import abc
from typing import BinaryIO, Iterable, Text import io
import itertools
import pathlib
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from ._compat import runtime_checkable, Protocol from ._compat import runtime_checkable, Protocol, StrPath
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta): class ResourceReader(metaclass=abc.ABCMeta):
@ -46,27 +52,34 @@ class ResourceReader(metaclass=abc.ABCMeta):
raise FileNotFoundError raise FileNotFoundError
class TraversalError(Exception):
pass
@runtime_checkable @runtime_checkable
class Traversable(Protocol): class Traversable(Protocol):
""" """
An object with a subset of pathlib.Path methods suitable for An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files. traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
""" """
@abc.abstractmethod @abc.abstractmethod
def iterdir(self): def iterdir(self) -> Iterator["Traversable"]:
""" """
Yield Traversable objects in self Yield Traversable objects in self
""" """
def read_bytes(self): def read_bytes(self) -> bytes:
""" """
Read contents of self as bytes Read contents of self as bytes
""" """
with self.open('rb') as strm: with self.open('rb') as strm:
return strm.read() return strm.read()
def read_text(self, encoding=None): def read_text(self, encoding: Optional[str] = None) -> str:
""" """
Read contents of self as text Read contents of self as text
""" """
@ -85,13 +98,32 @@ class Traversable(Protocol):
Return True if self is a file Return True if self is a file
""" """
@abc.abstractmethod def joinpath(self, *descendants: StrPath) -> "Traversable":
def joinpath(self, child):
"""
Return Traversable child in self
""" """
Return Traversable resolved with any descendants applied.
def __truediv__(self, child): Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
if not descendants:
return self
names = itertools.chain.from_iterable(
path.parts for path in map(pathlib.PurePosixPath, descendants)
)
target = next(names)
matches = (
traversable for traversable in self.iterdir() if traversable.name == target
)
try:
match = next(matches)
except StopIteration:
raise TraversalError(
"Target not found during traversal.", target, list(names)
)
return match.joinpath(*names)
def __truediv__(self, child: StrPath) -> "Traversable":
""" """
Return Traversable child in self Return Traversable child in self
""" """
@ -107,7 +139,8 @@ class Traversable(Protocol):
accepted by io.TextIOWrapper. accepted by io.TextIOWrapper.
""" """
@abc.abstractproperty @property
@abc.abstractmethod
def name(self) -> str: def name(self) -> str:
""" """
The base name of this object without any parent references. The base name of this object without any parent references.
@ -121,17 +154,17 @@ class TraversableResources(ResourceReader):
""" """
@abc.abstractmethod @abc.abstractmethod
def files(self): def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package.""" """Return a Traversable object for the loaded package."""
def open_resource(self, resource): def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().joinpath(resource).open('rb') return self.files().joinpath(resource).open('rb')
def resource_path(self, resource): def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource) raise FileNotFoundError(resource)
def is_resource(self, path): def is_resource(self, path: StrPath) -> bool:
return self.files().joinpath(path).is_file() return self.files().joinpath(path).is_file()
def contents(self): def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir()) return (item.name for item in self.files().iterdir())

View file

@ -82,15 +82,13 @@ class MultiplexedPath(abc.Traversable):
def is_file(self): def is_file(self):
return False return False
def joinpath(self, child): def joinpath(self, *descendants):
# first try to find child in current paths try:
for file in self.iterdir(): return super().joinpath(*descendants)
if file.name == child: except abc.TraversalError:
return file # One of the paths did not resolve (a directory does not exist).
# if it does not exist, construct it with the first path # Just return something that will not exist.
return self._paths[0] / child return self._paths[0].joinpath(*descendants)
__truediv__ = joinpath
def open(self, *args, **kwargs): def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file') raise FileNotFoundError(f'{self} is not a file')

View file

@ -16,31 +16,28 @@ class SimpleReader(abc.ABC):
provider. provider.
""" """
@abc.abstractproperty @property
def package(self): @abc.abstractmethod
# type: () -> str def package(self) -> str:
""" """
The name of the package for which this reader loads resources. The name of the package for which this reader loads resources.
""" """
@abc.abstractmethod @abc.abstractmethod
def children(self): def children(self) -> List['SimpleReader']:
# type: () -> List['SimpleReader']
""" """
Obtain an iterable of SimpleReader for available Obtain an iterable of SimpleReader for available
child containers (e.g. directories). child containers (e.g. directories).
""" """
@abc.abstractmethod @abc.abstractmethod
def resources(self): def resources(self) -> List[str]:
# type: () -> List[str]
""" """
Obtain available named resources for this virtual package. Obtain available named resources for this virtual package.
""" """
@abc.abstractmethod @abc.abstractmethod
def open_binary(self, resource): def open_binary(self, resource: str) -> BinaryIO:
# type: (str) -> BinaryIO
""" """
Obtain a File-like for a named resource. Obtain a File-like for a named resource.
""" """
@ -50,13 +47,35 @@ class SimpleReader(abc.ABC):
return self.package.split('.')[-1] return self.package.split('.')[-1]
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader: SimpleReader):
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
class ResourceHandle(Traversable): class ResourceHandle(Traversable):
""" """
Handle to a named resource in a ResourceReader. Handle to a named resource in a ResourceReader.
""" """
def __init__(self, parent, name): def __init__(self, parent: ResourceContainer, name: str):
# type: (ResourceContainer, str) -> None
self.parent = parent self.parent = parent
self.name = name # type: ignore self.name = name # type: ignore
@ -76,35 +95,6 @@ class ResourceHandle(Traversable):
raise RuntimeError("Cannot traverse into a resource") raise RuntimeError("Cannot traverse into a resource")
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader):
# type: (SimpleReader) -> None
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
def joinpath(self, name):
return next(
traversable for traversable in self.iterdir() if traversable.name == name
)
class TraversableReader(TraversableResources, SimpleReader): class TraversableReader(TraversableResources, SimpleReader):
""" """
A TraversableResources based on SimpleReader. Resource providers A TraversableResources based on SimpleReader. Resource providers

View file

@ -5,10 +5,18 @@ import functools
import tempfile import tempfile
import shutil import shutil
import operator import operator
import warnings
@contextlib.contextmanager @contextlib.contextmanager
def pushd(dir): def pushd(dir):
"""
>>> tmp_path = getfixture('tmp_path')
>>> with pushd(tmp_path):
... assert os.getcwd() == os.fspath(tmp_path)
>>> assert os.getcwd() != os.fspath(tmp_path)
"""
orig = os.getcwd() orig = os.getcwd()
os.chdir(dir) os.chdir(dir)
try: try:
@ -29,6 +37,8 @@ def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
if runner is None: if runner is None:
runner = functools.partial(subprocess.check_call, shell=True) runner = functools.partial(subprocess.check_call, shell=True)
else:
warnings.warn("runner parameter is deprecated", DeprecationWarning)
# In the tar command, use --strip-components=1 to strip the first path and # In the tar command, use --strip-components=1 to strip the first path and
# then # then
# use -C to cause the files to be extracted to {target_dir}. This ensures # use -C to cause the files to be extracted to {target_dir}. This ensures
@ -48,6 +58,15 @@ def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
def infer_compression(url): def infer_compression(url):
""" """
Given a URL or filename, infer the compression code for tar. Given a URL or filename, infer the compression code for tar.
>>> infer_compression('http://foo/bar.tar.gz')
'z'
>>> infer_compression('http://foo/bar.tgz')
'z'
>>> infer_compression('file.bz')
'j'
>>> infer_compression('file.xz')
'J'
""" """
# cheat and just assume it's the last two characters # cheat and just assume it's the last two characters
compression_indicator = url[-2:] compression_indicator = url[-2:]
@ -61,6 +80,12 @@ def temp_dir(remover=shutil.rmtree):
""" """
Create a temporary directory context. Pass a custom remover Create a temporary directory context. Pass a custom remover
to override the removal behavior. to override the removal behavior.
>>> import pathlib
>>> with temp_dir() as the_dir:
... assert os.path.isdir(the_dir)
... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents')
>>> assert not os.path.exists(the_dir)
""" """
temp_dir = tempfile.mkdtemp() temp_dir = tempfile.mkdtemp()
try: try:
@ -90,6 +115,12 @@ def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
@contextlib.contextmanager @contextlib.contextmanager
def null(): def null():
"""
A null context suitable to stand in for a meaningful context.
>>> with null() as value:
... assert value is None
"""
yield yield
@ -112,6 +143,10 @@ class ExceptionTrap:
... raise ValueError("1 + 1 is not 3") ... raise ValueError("1 + 1 is not 3")
>>> bool(trap) >>> bool(trap)
True True
>>> trap.value
ValueError('1 + 1 is not 3')
>>> trap.tb
<traceback object at ...>
>>> with ExceptionTrap(ValueError) as trap: >>> with ExceptionTrap(ValueError) as trap:
... raise Exception() ... raise Exception()
@ -211,3 +246,43 @@ class suppress(contextlib.suppress, contextlib.ContextDecorator):
... {}[''] ... {}['']
>>> key_error() >>> key_error()
""" """
class on_interrupt(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(
self,
action='error',
# py3.7 compat
# /,
code=1,
):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'

View file

@ -1,4 +1,6 @@
"""More routines for operating on iterables, beyond itertools"""
from .more import * # noqa from .more import * # noqa
from .recipes import * # noqa from .recipes import * # noqa
__version__ = '8.12.0' __version__ = '9.0.0'

View file

@ -3,7 +3,7 @@ import warnings
from collections import Counter, defaultdict, deque, abc from collections import Counter, defaultdict, deque, abc
from collections.abc import Sequence from collections.abc import Sequence
from functools import partial, reduce, wraps from functools import partial, reduce, wraps
from heapq import merge, heapify, heapreplace, heappop from heapq import heapify, heapreplace, heappop
from itertools import ( from itertools import (
chain, chain,
compress, compress,
@ -26,12 +26,16 @@ from sys import hexversion, maxsize
from time import monotonic from time import monotonic
from .recipes import ( from .recipes import (
_marker,
_zip_equal,
UnequalIterablesError,
consume, consume,
flatten, flatten,
pairwise, pairwise,
powerset, powerset,
take, take,
unique_everseen, unique_everseen,
all_equal,
) )
__all__ = [ __all__ = [
@ -48,9 +52,9 @@ __all__ = [
'chunked_even', 'chunked_even',
'circular_shifts', 'circular_shifts',
'collapse', 'collapse',
'collate',
'combination_index', 'combination_index',
'consecutive_groups', 'consecutive_groups',
'constrained_batches',
'consumer', 'consumer',
'count_cycle', 'count_cycle',
'countable', 'countable',
@ -66,6 +70,7 @@ __all__ = [
'first', 'first',
'groupby_transform', 'groupby_transform',
'ichunked', 'ichunked',
'iequals',
'ilen', 'ilen',
'interleave', 'interleave',
'interleave_evenly', 'interleave_evenly',
@ -76,6 +81,7 @@ __all__ = [
'iterate', 'iterate',
'last', 'last',
'locate', 'locate',
'longest_common_prefix',
'lstrip', 'lstrip',
'make_decorator', 'make_decorator',
'map_except', 'map_except',
@ -132,9 +138,6 @@ __all__ = [
] ]
_marker = object()
def chunked(iterable, n, strict=False): def chunked(iterable, n, strict=False):
"""Break *iterable* into lists of length *n*: """Break *iterable* into lists of length *n*:
@ -409,44 +412,6 @@ class peekable:
return self._cache[index] return self._cache[index]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 3.5+, this function is an alias for :func:`heapq.merge`.
"""
warnings.warn(
"collate is no longer part of more_itertools, use heapq.merge",
DeprecationWarning,
)
return merge(*iterables, **kwargs)
def consumer(func): def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator" """Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it to its first yield point so you don't have to call ``next()`` on it
@ -872,7 +837,9 @@ def windowed(seq, n, fillvalue=None, step=1):
yield tuple(window) yield tuple(window)
size = len(window) size = len(window)
if size < n: if size == 0:
return
elif size < n:
yield tuple(chain(window, repeat(fillvalue, n - size))) yield tuple(chain(window, repeat(fillvalue, n - size)))
elif 0 < i < min(step, n): elif 0 < i < min(step, n):
window += (fillvalue,) * i window += (fillvalue,) * i
@ -1645,45 +1612,6 @@ def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
) )
class UnequalIterablesError(ValueError):
def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def _zip_equal(*iterables):
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
break
else:
# If we didn't break out, we can use the built-in zip.
return zip(*iterables)
# If we did break out, there was a mismatch.
raise UnequalIterablesError(details=(first_size, i, size))
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def zip_equal(*iterables): def zip_equal(*iterables):
"""``zip`` the input *iterables* together, but raise """``zip`` the input *iterables* together, but raise
``UnequalIterablesError`` if they aren't all the same length. ``UnequalIterablesError`` if they aren't all the same length.
@ -1825,7 +1753,7 @@ def unzip(iterable):
of the zipped *iterable*. of the zipped *iterable*.
The ``i``-th iterable contains the ``i``-th element from each element The ``i``-th iterable contains the ``i``-th element from each element
of the zipped iterable. The first element is used to to determine the of the zipped iterable. The first element is used to determine the
length of the remaining elements. length of the remaining elements.
>>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
@ -2375,6 +2303,16 @@ def locate(iterable, pred=bool, window_size=None):
return compress(count(), starmap(pred, it)) return compress(count(), starmap(pred, it))
def longest_common_prefix(iterables):
"""Yield elements of the longest common prefix amongst given *iterables*.
>>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf']))
'ab'
"""
return (c[0] for c in takewhile(all_equal, zip(*iterables)))
def lstrip(iterable, pred): def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning """Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``. for which *pred* returns ``True``.
@ -2683,7 +2621,7 @@ def difference(iterable, func=sub, *, initial=None):
if initial is not None: if initial is not None:
first = [] first = []
return chain(first, starmap(func, zip(b, a))) return chain(first, map(func, b, a))
class SequenceView(Sequence): class SequenceView(Sequence):
@ -3326,6 +3264,27 @@ def only(iterable, default=None, too_long=None):
return first_value return first_value
class _IChunk:
def __init__(self, iterable, n):
self._it = islice(iterable, n)
self._cache = deque()
def fill_cache(self):
self._cache.extend(self._it)
def __iter__(self):
return self
def __next__(self):
try:
return next(self._it)
except StopIteration:
if self._cache:
return self._cache.popleft()
else:
raise
def ichunked(iterable, n): def ichunked(iterable, n):
"""Break *iterable* into sub-iterables with *n* elements each. """Break *iterable* into sub-iterables with *n* elements each.
:func:`ichunked` is like :func:`chunked`, but it yields iterables :func:`ichunked` is like :func:`chunked`, but it yields iterables
@ -3347,20 +3306,39 @@ def ichunked(iterable, n):
[8, 9, 10, 11] [8, 9, 10, 11]
""" """
source = iter(iterable) source = peekable(iter(iterable))
ichunk_marker = object()
while True: while True:
# Check to see whether we're at the end of the source iterable # Check to see whether we're at the end of the source iterable
item = next(source, _marker) item = source.peek(ichunk_marker)
if item is _marker: if item is ichunk_marker:
return return
# Clone the source and yield an n-length slice chunk = _IChunk(source, n)
source, it = tee(chain([item], source)) yield chunk
yield islice(it, n)
# Advance the source iterable # Advance the source iterable and fill previous chunk's cache
consume(source, n) chunk.fill_cache()
def iequals(*iterables):
"""Return ``True`` if all given *iterables* are equal to each other,
which means that they contain the same elements in the same order.
The function is useful for comparing iterables of different data types
or iterables that do not support equality checks.
>>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc"))
True
>>> iequals("abc", "acb")
False
Not to be confused with :func:`all_equals`, which checks whether all
elements of iterable are equal to each other.
"""
return all(map(all_equal, zip_longest(*iterables, fillvalue=object())))
def distinct_combinations(iterable, r): def distinct_combinations(iterable, r):
@ -3655,7 +3633,9 @@ class callback_iter:
self._aborted = False self._aborted = False
self._future = None self._future = None
self._wait_seconds = wait_seconds self._wait_seconds = wait_seconds
self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) # Lazily import concurrent.future
self._executor = __import__(
).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
self._iterator = self._reader() self._iterator = self._reader()
def __enter__(self): def __enter__(self):
@ -3960,7 +3940,7 @@ def combination_index(element, iterable):
n, _ = last(pool, default=(n, None)) n, _ = last(pool, default=(n, None))
# Python versiosn below 3.8 don't have math.comb # Python versions below 3.8 don't have math.comb
index = 1 index = 1
for i, j in enumerate(reversed(indexes), start=1): for i, j in enumerate(reversed(indexes), start=1):
j = n - j j = n - j
@ -4113,7 +4093,7 @@ def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False):
If the *strict* keyword argument is ``True``, then If the *strict* keyword argument is ``True``, then
``UnequalIterablesError`` will be raised if any of the iterables have ``UnequalIterablesError`` will be raised if any of the iterables have
different lengthss. different lengths.
""" """
def is_scalar(obj): def is_scalar(obj):
@ -4314,3 +4294,53 @@ def minmax(iterable_or_value, *others, key=None, default=_marker):
hi, hi_key = y, y_key hi, hi_key = y, y_key
return lo, hi return lo, hi
def constrained_batches(
iterable, max_size, max_count=None, get_len=len, strict=True
):
"""Yield batches of items from *iterable* with a combined size limited by
*max_size*.
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10))
[(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')]
If a *max_count* is supplied, the number of items per batch is also
limited:
>>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1']
>>> list(constrained_batches(iterable, 10, max_count = 2))
[(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)]
If a *get_len* function is supplied, use that instead of :func:`len` to
determine item size.
If *strict* is ``True``, raise ``ValueError`` if any single item is bigger
than *max_size*. Otherwise, allow single items to exceed *max_size*.
"""
if max_size <= 0:
raise ValueError('maximum size must be greater than zero')
batch = []
batch_size = 0
batch_count = 0
for item in iterable:
item_len = get_len(item)
if strict and item_len > max_size:
raise ValueError('item size exceeds maximum size')
reached_count = batch_count == max_count
reached_size = item_len + batch_size > max_size
if batch_count and (reached_size or reached_count):
yield tuple(batch)
batch.clear()
batch_size = 0
batch_count = 0
batch.append(item)
batch_size += item_len
batch_count += 1
if batch:
yield tuple(batch)

View file

@ -72,7 +72,6 @@ class peekable(Generic[_T], Iterator[_T]):
@overload @overload
def __getitem__(self, index: slice) -> List[_T]: ... def __getitem__(self, index: slice) -> List[_T]: ...
def collate(*iterables: Iterable[_T], **kwargs: Any) -> Iterable[_T]: ...
def consumer(func: _GenFn) -> _GenFn: ... def consumer(func: _GenFn) -> _GenFn: ...
def ilen(iterable: Iterable[object]) -> int: ... def ilen(iterable: Iterable[object]) -> int: ...
def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
@ -179,7 +178,7 @@ def padded(
iterable: Iterable[_T], iterable: Iterable[_T],
*, *,
n: Optional[int] = ..., n: Optional[int] = ...,
next_multiple: bool = ... next_multiple: bool = ...,
) -> Iterator[Optional[_T]]: ... ) -> Iterator[Optional[_T]]: ...
@overload @overload
def padded( def padded(
@ -225,7 +224,7 @@ def zip_equal(
__iter1: Iterable[_T], __iter1: Iterable[_T],
__iter2: Iterable[_T], __iter2: Iterable[_T],
__iter3: Iterable[_T], __iter3: Iterable[_T],
*iterables: Iterable[_T] *iterables: Iterable[_T],
) -> Iterator[Tuple[_T, ...]]: ... ) -> Iterator[Tuple[_T, ...]]: ...
@overload @overload
def zip_offset( def zip_offset(
@ -233,7 +232,7 @@ def zip_offset(
*, *,
offsets: _SizedIterable[int], offsets: _SizedIterable[int],
longest: bool = ..., longest: bool = ...,
fillvalue: None = None fillvalue: None = None,
) -> Iterator[Tuple[Optional[_T1]]]: ... ) -> Iterator[Tuple[Optional[_T1]]]: ...
@overload @overload
def zip_offset( def zip_offset(
@ -242,7 +241,7 @@ def zip_offset(
*, *,
offsets: _SizedIterable[int], offsets: _SizedIterable[int],
longest: bool = ..., longest: bool = ...,
fillvalue: None = None fillvalue: None = None,
) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ... ) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ...
@overload @overload
def zip_offset( def zip_offset(
@ -252,7 +251,7 @@ def zip_offset(
*iterables: Iterable[_T], *iterables: Iterable[_T],
offsets: _SizedIterable[int], offsets: _SizedIterable[int],
longest: bool = ..., longest: bool = ...,
fillvalue: None = None fillvalue: None = None,
) -> Iterator[Tuple[Optional[_T], ...]]: ... ) -> Iterator[Tuple[Optional[_T], ...]]: ...
@overload @overload
def zip_offset( def zip_offset(
@ -420,7 +419,7 @@ def difference(
iterable: Iterable[_T], iterable: Iterable[_T],
func: Callable[[_T, _T], _U] = ..., func: Callable[[_T, _T], _U] = ...,
*, *,
initial: None = ... initial: None = ...,
) -> Iterator[Union[_T, _U]]: ... ) -> Iterator[Union[_T, _U]]: ...
@overload @overload
def difference( def difference(
@ -529,12 +528,12 @@ def distinct_combinations(
def filter_except( def filter_except(
validator: Callable[[Any], object], validator: Callable[[Any], object],
iterable: Iterable[_T], iterable: Iterable[_T],
*exceptions: Type[BaseException] *exceptions: Type[BaseException],
) -> Iterator[_T]: ... ) -> Iterator[_T]: ...
def map_except( def map_except(
function: Callable[[Any], _U], function: Callable[[Any], _U],
iterable: Iterable[_T], iterable: Iterable[_T],
*exceptions: Type[BaseException] *exceptions: Type[BaseException],
) -> Iterator[_U]: ... ) -> Iterator[_U]: ...
def map_if( def map_if(
iterable: Iterable[Any], iterable: Iterable[Any],
@ -610,7 +609,7 @@ def zip_broadcast(
scalar_types: Union[ scalar_types: Union[
type, Tuple[Union[type, Tuple[Any, ...]], ...], None type, Tuple[Union[type, Tuple[Any, ...]], ...], None
] = ..., ] = ...,
strict: bool = ... strict: bool = ...,
) -> Iterable[Tuple[_T, ...]]: ... ) -> Iterable[Tuple[_T, ...]]: ...
def unique_in_window( def unique_in_window(
iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ... iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ...
@ -640,7 +639,7 @@ def minmax(
iterable_or_value: Iterable[_SupportsLessThanT], iterable_or_value: Iterable[_SupportsLessThanT],
*, *,
key: None = None, key: None = None,
default: _U default: _U,
) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ... ) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ...
@overload @overload
def minmax( def minmax(
@ -653,12 +652,23 @@ def minmax(
def minmax( def minmax(
iterable_or_value: _SupportsLessThanT, iterable_or_value: _SupportsLessThanT,
__other: _SupportsLessThanT, __other: _SupportsLessThanT,
*others: _SupportsLessThanT *others: _SupportsLessThanT,
) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ... ) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
@overload @overload
def minmax( def minmax(
iterable_or_value: _T, iterable_or_value: _T,
__other: _T, __other: _T,
*others: _T, *others: _T,
key: Callable[[_T], _SupportsLessThan] key: Callable[[_T], _SupportsLessThan],
) -> Tuple[_T, _T]: ... ) -> Tuple[_T, _T]: ...
def longest_common_prefix(
iterables: Iterable[Iterable[_T]],
) -> Iterator[_T]: ...
def iequals(*iterables: Iterable[object]) -> bool: ...
def constrained_batches(
iterable: Iterable[object],
max_size: int,
max_count: Optional[int] = ...,
get_len: Callable[[_T], object] = ...,
strict: bool = ...,
) -> Iterator[Tuple[_T]]: ...

View file

@ -7,11 +7,16 @@ Some backward-compatible usability improvements have been made.
.. [1] http://docs.python.org/library/itertools.html#recipes .. [1] http://docs.python.org/library/itertools.html#recipes
""" """
import warnings import math
import operator
from collections import deque from collections import deque
from collections.abc import Sized
from functools import reduce
from itertools import ( from itertools import (
chain, chain,
combinations, combinations,
compress,
count, count,
cycle, cycle,
groupby, groupby,
@ -21,11 +26,11 @@ from itertools import (
tee, tee,
zip_longest, zip_longest,
) )
import operator
from random import randrange, sample, choice from random import randrange, sample, choice
__all__ = [ __all__ = [
'all_equal', 'all_equal',
'batched',
'before_and_after', 'before_and_after',
'consume', 'consume',
'convolve', 'convolve',
@ -41,6 +46,7 @@ __all__ = [
'pad_none', 'pad_none',
'pairwise', 'pairwise',
'partition', 'partition',
'polynomial_from_roots',
'powerset', 'powerset',
'prepend', 'prepend',
'quantify', 'quantify',
@ -50,7 +56,9 @@ __all__ = [
'random_product', 'random_product',
'repeatfunc', 'repeatfunc',
'roundrobin', 'roundrobin',
'sieve',
'sliding_window', 'sliding_window',
'subslices',
'tabulate', 'tabulate',
'tail', 'tail',
'take', 'take',
@ -59,6 +67,8 @@ __all__ = [
'unique_justseen', 'unique_justseen',
] ]
_marker = object()
def take(n, iterable): def take(n, iterable):
"""Return first *n* items of the iterable as a list. """Return first *n* items of the iterable as a list.
@ -102,7 +112,14 @@ def tail(n, iterable):
['E', 'F', 'G'] ['E', 'F', 'G']
""" """
return iter(deque(iterable, maxlen=n)) # If the given iterable has a length, then we can use islice to get its
# final elements. Note that if the iterable is not actually Iterable,
# either islice or deque will throw a TypeError. This is why we don't
# check if it is Iterable.
if isinstance(iterable, Sized):
yield from islice(iterable, max(0, len(iterable) - n), None)
else:
yield from iter(deque(iterable, maxlen=n))
def consume(iterator, n=None): def consume(iterator, n=None):
@ -284,20 +301,83 @@ else:
pairwise.__doc__ = _pairwise.__doc__ pairwise.__doc__ = _pairwise.__doc__
def grouper(iterable, n, fillvalue=None): class UnequalIterablesError(ValueError):
"""Collect data into fixed-length chunks or blocks. def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
>>> list(grouper('ABCDEFG', 3, 'x')) super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def _zip_equal(*iterables):
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
break
else:
# If we didn't break out, we can use the built-in zip.
return zip(*iterables)
# If we did break out, there was a mismatch.
raise UnequalIterablesError(details=(first_size, i, size))
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def grouper(iterable, n, incomplete='fill', fillvalue=None):
"""Group elements from *iterable* into fixed-length groups of length *n*.
>>> list(grouper('ABCDEF', 3))
[('A', 'B', 'C'), ('D', 'E', 'F')]
The keyword arguments *incomplete* and *fillvalue* control what happens for
iterables whose length is not a multiple of *n*.
When *incomplete* is `'fill'`, the last group will contain instances of
*fillvalue*.
>>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
When *incomplete* is `'ignore'`, the last group will not be emitted.
>>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F')]
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
>>> it = grouper('ABCDEFG', 3, incomplete='strict')
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnequalIterablesError
""" """
if isinstance(iterable, int):
warnings.warn(
"grouper expects iterable as first parameter", DeprecationWarning
)
n, iterable = iterable, n
args = [iter(iterable)] * n args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args) if incomplete == 'fill':
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == 'strict':
return _zip_equal(*args)
if incomplete == 'ignore':
return zip(*args)
else:
raise ValueError('Expected fill, strict, or ignore')
def roundrobin(*iterables): def roundrobin(*iterables):
@ -658,11 +738,12 @@ def before_and_after(predicate, it):
transition.append(elem) transition.append(elem)
return return
def remainder_iterator(): # Note: this is different from itertools recipes to allow nesting
yield from transition # before_and_after remainders into before_and_after again. See tests
yield from it # for an example.
remainder_iterator = chain(transition, it)
return true_iterator(), remainder_iterator() return true_iterator(), remainder_iterator
def triplewise(iterable): def triplewise(iterable):
@ -696,3 +777,65 @@ def sliding_window(iterable, n):
for x in it: for x in it:
window.append(x) window.append(x)
yield tuple(window) yield tuple(window)
def subslices(iterable):
"""Return all contiguous non-empty subslices of *iterable*.
>>> list(subslices('ABC'))
[['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
This is similar to :func:`substrings`, but emits items in a different
order.
"""
seq = list(iterable)
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
return map(operator.getitem, repeat(seq), slices)
def polynomial_from_roots(roots):
"""Compute a polynomial's coefficients from its roots.
>>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
>>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
[1, -4, -17, 60]
"""
# Use math.prod for Python 3.8+,
prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1))
roots = list(map(operator.neg, roots))
return [
sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1)
]
def sieve(n):
"""Yield the primes less than n.
>>> list(sieve(30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
limit = isqrt(n) + 1
data = bytearray([1]) * n
data[:2] = 0, 0
for p in compress(range(limit), data):
data[p + p : n : p] = bytearray(len(range(p + p, n, p)))
return compress(count(), data)
def batched(iterable, n):
"""Batch data into lists of length *n*. The last batch may be shorter.
>>> list(batched('ABCDEFG', 3))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
This recipe is from the ``itertools`` docs. This library also provides
:func:`chunked`, which has a different implementation.
"""
it = iter(iterable)
while True:
batch = list(islice(it, n))
if not batch:
break
yield batch

View file

@ -6,6 +6,7 @@ from typing import (
Iterator, Iterator,
List, List,
Optional, Optional,
Sequence,
Tuple, Tuple,
TypeVar, TypeVar,
Union, Union,
@ -39,21 +40,11 @@ def repeatfunc(
func: Callable[..., _U], times: Optional[int] = ..., *args: Any func: Callable[..., _U], times: Optional[int] = ..., *args: Any
) -> Iterator[_U]: ... ) -> Iterator[_U]: ...
def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ... def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ...
@overload
def grouper( def grouper(
iterable: Iterable[_T], n: int iterable: Iterable[_T],
) -> Iterator[Tuple[Optional[_T], ...]]: ... n: int,
@overload incomplete: str = ...,
def grouper( fillvalue: _U = ...,
iterable: Iterable[_T], n: int, fillvalue: _U
) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
@overload
def grouper( # Deprecated interface
iterable: int, n: Iterable[_T]
) -> Iterator[Tuple[Optional[_T], ...]]: ...
@overload
def grouper( # Deprecated interface
iterable: int, n: Iterable[_T], fillvalue: _U
) -> Iterator[Tuple[Union[_T, _U], ...]]: ... ) -> Iterator[Tuple[Union[_T, _U], ...]]: ...
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ... def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
def partition( def partition(
@ -110,3 +101,10 @@ def triplewise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T, _T]]: ...
def sliding_window( def sliding_window(
iterable: Iterable[_T], n: int iterable: Iterable[_T], n: int
) -> Iterator[Tuple[_T, ...]]: ... ) -> Iterator[Tuple[_T, ...]]: ...
def subslices(iterable: Iterable[_T]) -> Iterator[List[_T]]: ...
def polynomial_from_roots(roots: Sequence[int]) -> List[int]: ...
def sieve(n: int) -> Iterator[int]: ...
def batched(
iterable: Iterable[_T],
n: int,
) -> Iterator[List[_T]]: ...

View file

@ -1,26 +0,0 @@
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "21.3"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "2014-2019 %s" % __author__

View file

@ -2,24 +2,14 @@
# 2.0, and the BSD License. See the LICENSE file in the root of this repository # 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details. # for complete details.
from .__about__ import ( __title__ = "packaging"
__author__, __summary__ = "Core utilities for Python packages"
__copyright__, __uri__ = "https://github.com/pypa/packaging"
__email__,
__license__,
__summary__,
__title__,
__uri__,
__version__,
)
__all__ = [ __version__ = "23.0"
"__title__",
"__summary__", __author__ = "Donald Stufft and individual contributors"
"__uri__", __email__ = "donald@stufft.io"
"__version__",
"__author__", __license__ = "BSD-2-Clause or Apache-2.0"
"__email__", __copyright__ = "2014-2019 %s" % __author__
"__license__",
"__copyright__",
]

View file

@ -0,0 +1,108 @@
"""
ELF file parser.
This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
import enum
import os
import struct
from typing import IO, Optional, Tuple
class ELFInvalid(ValueError):
pass
class EIClass(enum.IntEnum):
C32 = 1
C64 = 2
class EIData(enum.IntEnum):
Lsb = 1
Msb = 2
class EMachine(enum.IntEnum):
I386 = 3
S390 = 22
Arm = 40
X8664 = 62
AArc64 = 183
class ELFFile:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error:
raise ELFInvalid("unable to parse identification")
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
)
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> Optional[str]:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None

View file

@ -1,121 +1,58 @@
import collections import collections
import contextlib
import functools import functools
import os import os
import re import re
import struct
import sys import sys
import warnings import warnings
from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
from ._elffile import EIClass, EIData, ELFFile, EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
# Python does not provide platform information at sufficient granularity to @contextlib.contextmanager
# identify the architecture of the running executable in some cases, so we def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
# determine it dynamically by reading the information from the running
# process. This only applies on Linux, which uses the ELF format.
class _ELFFileHeader:
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
class _InvalidELFFileHeader(ValueError):
"""
An invalid ELF file header was found.
"""
ELF_MAGIC_NUMBER = 0x7F454C46
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
EM_386 = 3
EM_S390 = 22
EM_ARM = 40
EM_X86_64 = 62
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
def __init__(self, file: IO[bytes]) -> None:
def unpack(fmt: str) -> int:
try:
data = file.read(struct.calcsize(fmt))
result: Tuple[int, ...] = struct.unpack(fmt, data)
except struct.error:
raise _ELFFileHeader._InvalidELFFileHeader()
return result[0]
self.e_ident_magic = unpack(">I")
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_class = unpack("B")
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_data = unpack("B")
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
raise _ELFFileHeader._InvalidELFFileHeader()
self.e_ident_version = unpack("B")
self.e_ident_osabi = unpack("B")
self.e_ident_abiversion = unpack("B")
self.e_ident_pad = file.read(7)
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
self.e_type = unpack(format_h)
self.e_machine = unpack(format_h)
self.e_version = unpack(format_i)
self.e_entry = unpack(format_p)
self.e_phoff = unpack(format_p)
self.e_shoff = unpack(format_p)
self.e_flags = unpack(format_i)
self.e_ehsize = unpack(format_h)
self.e_phentsize = unpack(format_h)
self.e_phnum = unpack(format_h)
self.e_shentsize = unpack(format_h)
self.e_shnum = unpack(format_h)
self.e_shstrndx = unpack(format_h)
def _get_elf_header() -> Optional[_ELFFileHeader]:
try: try:
with open(sys.executable, "rb") as f: with open(path, "rb") as f:
elf_header = _ELFFileHeader(f) yield ELFFile(f)
except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): except (OSError, TypeError, ValueError):
return None yield None
return elf_header
def _is_linux_armhf() -> bool: def _is_linux_armhf(executable: str) -> bool:
# hard-float ABI can be detected from the ELF header of the running # hard-float ABI can be detected from the ELF header of the running
# process # process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
elf_header = _get_elf_header() with _parse_elf(executable) as f:
if elf_header is None: return (
return False f is not None
result = elf_header.e_ident_class == elf_header.ELFCLASS32 and f.capacity == EIClass.C32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB and f.encoding == EIData.Lsb
result &= elf_header.e_machine == elf_header.EM_ARM and f.machine == EMachine.Arm
result &= ( and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
elf_header.e_flags & elf_header.EF_ARM_ABIMASK and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_VER5 )
result &= (
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
) == elf_header.EF_ARM_ABI_FLOAT_HARD
return result
def _is_linux_i686() -> bool: def _is_linux_i686(executable: str) -> bool:
elf_header = _get_elf_header() with _parse_elf(executable) as f:
if elf_header is None: return (
return False f is not None
result = elf_header.e_ident_class == elf_header.ELFCLASS32 and f.capacity == EIClass.C32
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB and f.encoding == EIData.Lsb
result &= elf_header.e_machine == elf_header.EM_386 and f.machine == EMachine.I386
return result )
def _have_compatible_abi(arch: str) -> bool: def _have_compatible_abi(executable: str, arch: str) -> bool:
if arch == "armv7l": if arch == "armv7l":
return _is_linux_armhf() return _is_linux_armhf(executable)
if arch == "i686": if arch == "i686":
return _is_linux_i686() return _is_linux_i686(executable)
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
@ -141,10 +78,10 @@ def _glibc_version_string_confstr() -> Optional[str]:
# platform module. # platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try: try:
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". # Should be a string like "glibc 2.17".
version_string = os.confstr("CS_GNU_LIBC_VERSION") version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
assert version_string is not None assert version_string is not None
_, version = version_string.split() _, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError): except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None return None
@ -211,8 +148,8 @@ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m: if not m:
warnings.warn( warnings.warn(
"Expected glibc version with 2 components major.minor," f"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str, f" got: {version_str}",
RuntimeWarning, RuntimeWarning,
) )
return -1, -1 return -1, -1
@ -265,7 +202,7 @@ _LEGACY_MANYLINUX_MAP = {
def platform_tags(linux: str, arch: str) -> Iterator[str]: def platform_tags(linux: str, arch: str) -> Iterator[str]:
if not _have_compatible_abi(arch): if not _have_compatible_abi(sys.executable, arch):
return return
# Oldest glibc to be supported regardless of architecture is (2, 17). # Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16) too_old_glibc2 = _GLibCVersion(2, 16)

View file

@ -4,68 +4,13 @@ This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used. linked against musl, and what musl version is used.
""" """
import contextlib
import functools import functools
import operator
import os
import re import re
import struct
import subprocess import subprocess
import sys import sys
from typing import IO, Iterator, NamedTuple, Optional, Tuple from typing import Iterator, NamedTuple, Optional
from ._elffile import ELFFile
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
"""Detect musl libc location by parsing the Python executable.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
f.seek(0)
try:
ident = _read_unpacked(f, "16B")
except struct.error:
return None
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
return None
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, p_fmt, p_idx = {
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
}[ident[4]]
except KeyError:
return None
else:
p_get = operator.itemgetter(*p_idx)
# Find the interpreter section and return its content.
try:
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
except struct.error:
return None
for i in range(e_phnum + 1):
f.seek(e_phoff + e_phentsize * i)
try:
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
except struct.error:
return None
if p_type != 3: # Not PT_INTERP.
continue
f.seek(p_offset)
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
if "musl" not in interpreter:
return None
return interpreter
return None
class _MuslVersion(NamedTuple): class _MuslVersion(NamedTuple):
@ -95,13 +40,12 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
Version 1.2.2 Version 1.2.2
Dynamic Program Loader Dynamic Program Loader
""" """
with contextlib.ExitStack() as stack: try:
try: with open(executable, "rb") as f:
f = stack.enter_context(open(executable, "rb")) ld = ELFFile(f).interpreter
except OSError: except (OSError, TypeError, ValueError):
return None return None
ld = _parse_ld_musl_from_elf(f) if ld is None or "musl" not in ld:
if not ld:
return None return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr) return _parse_musl_version(proc.stderr)

View file

@ -0,0 +1,328 @@
"""Handwritten parser of dependency specifiers.
The docstring for each __parse_* function contains ENBF-inspired grammar representing
the implementation.
"""
import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class Node:
def __init__(self, value: str) -> None:
self.value = value
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
# mypy does not support recursive type definition
# https://github.com/python/mypy/issues/731
MarkerAtom = Any
MarkerList = List[Any]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: List[str]
specifier: str
marker: Optional[MarkerList]
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens("LEFT_BRACKET", "RIGHT_BRACKET"):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if (
env_var == "platform_python_implementation"
or env_var == "python_implementation"
):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
)

View file

@ -0,0 +1,188 @@
import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from .specifiers import Specifier
@dataclass
class Token:
name: str
text: str
position: int
class ParserSyntaxError(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: Tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(self, open_token: str, close_token: str) -> Iterator[bool]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield open_position is not None
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected closing {close_token}",
span_start=open_position,
)
self.read()

View file

@ -8,19 +8,10 @@ import platform
import sys import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pkg_resources.extern.pyparsing import ( # noqa: N817 from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker
Forward, from ._tokenizer import ParserSyntaxError
Group,
Literal as L,
ParseException,
ParseResults,
QuotedString,
ZeroOrMore,
stringEnd,
stringStart,
)
from .specifiers import InvalidSpecifier, Specifier from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
__all__ = [ __all__ = [
"InvalidMarker", "InvalidMarker",
@ -52,101 +43,24 @@ class UndefinedEnvironmentName(ValueError):
""" """
class Node: def _normalize_extra_values(results: Any) -> Any:
def __init__(self, value: Any) -> None: """
self.value = value Normalize extra values.
"""
def __str__(self) -> str: if isinstance(results[0], tuple):
return str(self.value) lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
def __repr__(self) -> str: normalized_extra = canonicalize_name(rhs.value)
return f"<{self.__class__.__name__}('{self}')>" rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
def serialize(self) -> str: normalized_extra = canonicalize_name(lhs.value)
raise NotImplementedError lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker( def _format_marker(
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str: ) -> str:
assert isinstance(marker, (list, tuple, str)) assert isinstance(marker, (list, tuple, str))
@ -192,7 +106,7 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
except InvalidSpecifier: except InvalidSpecifier:
pass pass
else: else:
return spec.contains(lhs) return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize()) oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None: if oper is None:
@ -201,25 +115,19 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
return oper(lhs, rhs) return oper(lhs, rhs)
class Undefined: def _normalize(*values: str, key: str) -> Tuple[str, ...]:
pass # PEP 685 Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
_undefined = Undefined() def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
def _get_env(environment: Dict[str, str], name: str) -> str:
value: Union[str, Undefined] = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
f"{name!r} does not exist in evaluation environment."
)
return value
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]] groups: List[List[bool]] = [[]]
for marker in markers: for marker in markers:
@ -231,12 +139,15 @@ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
lhs, op, rhs = marker lhs, op, rhs = marker
if isinstance(lhs, Variable): if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value) environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value rhs_value = rhs.value
else: else:
lhs_value = lhs.value lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value) environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value)) groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else: else:
assert marker in ["and", "or"] assert marker in ["and", "or"]
@ -274,13 +185,29 @@ def default_environment() -> Dict[str, str]:
class Marker: class Marker:
def __init__(self, marker: str) -> None: def __init__(self, marker: str) -> None:
# Note: We create a Marker object without calling this constructor in
# packaging.requirements.Requirement. If any additional logic is
# added here, make sure to mirror/adapt Requirement.
try: try:
self._markers = _coerce_parse_result(MARKER.parseString(marker)) self._markers = _normalize_extra_values(parse_marker(marker))
except ParseException as e: # The attribute `_markers` can be described in terms of a recursive type:
raise InvalidMarker( # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
f"Invalid marker: {marker!r}, parse error at " #
f"{marker[e.loc : e.loc + 8]!r}" # For example, the following expression:
) # python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
#
# is parsed into:
# [
# (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
# 'and',
# [
# (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
# 'or',
# (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
# ]
# ]
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str: def __str__(self) -> str:
return _format_marker(self._markers) return _format_marker(self._markers)
@ -288,6 +215,15 @@ class Marker:
def __repr__(self) -> str: def __repr__(self) -> str:
return f"<Marker('{self}')>" return f"<Marker('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Marker):
return NotImplemented
return str(self) == str(other)
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker. """Evaluate a marker.
@ -298,7 +234,12 @@ class Marker:
The environment is determined from the current Python process. The environment is determined from the current Python process.
""" """
current_environment = default_environment() current_environment = default_environment()
current_environment["extra"] = ""
if environment is not None: if environment is not None:
current_environment.update(environment) current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if current_environment["extra"] is None:
current_environment["extra"] = ""
return _evaluate_markers(self._markers, current_environment) return _evaluate_markers(self._markers, current_environment)

View file

@ -2,26 +2,13 @@
# 2.0, and the BSD License. See the LICENSE file in the root of this repository # 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details. # for complete details.
import re
import string
import urllib.parse import urllib.parse
from typing import List, Optional as TOptional, Set from typing import Any, List, Optional, Set
from pkg_resources.extern.pyparsing import ( # noqa from ._parser import parse_requirement
Combine, from ._tokenizer import ParserSyntaxError
Literal as L, from .markers import Marker, _normalize_extra_values
Optional, from .specifiers import SpecifierSet
ParseException,
Regex,
Word,
ZeroOrMore,
originalTextFor,
stringEnd,
stringStart,
)
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError): class InvalidRequirement(ValueError):
@ -30,60 +17,6 @@ class InvalidRequirement(ValueError):
""" """
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r"[^ ]+")("url")
URL = AT + URI
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
)("_raw_spec")
_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start : t._original_end])
)
MARKER_SEPARATOR = SEMICOLON
MARKER = MARKER_SEPARATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
# issue #104
REQUIREMENT.parseString("x[]")
class Requirement: class Requirement:
"""Parse a requirement. """Parse a requirement.
@ -99,28 +32,29 @@ class Requirement:
def __init__(self, requirement_string: str) -> None: def __init__(self, requirement_string: str) -> None:
try: try:
req = REQUIREMENT.parseString(requirement_string) parsed = parse_requirement(requirement_string)
except ParseException as e: except ParserSyntaxError as e:
raise InvalidRequirement( raise InvalidRequirement(str(e)) from e
f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
)
self.name: str = req.name self.name: str = parsed.name
if req.url: if parsed.url:
parsed_url = urllib.parse.urlparse(req.url) parsed_url = urllib.parse.urlparse(parsed.url)
if parsed_url.scheme == "file": if parsed_url.scheme == "file":
if urllib.parse.urlunparse(parsed_url) != req.url: if urllib.parse.urlunparse(parsed_url) != parsed.url:
raise InvalidRequirement("Invalid URL given") raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or ( elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc not parsed_url.scheme and not parsed_url.netloc
): ):
raise InvalidRequirement(f"Invalid URL: {req.url}") raise InvalidRequirement(f"Invalid URL: {parsed.url}")
self.url: TOptional[str] = req.url self.url: Optional[str] = parsed.url
else: else:
self.url = None self.url = None
self.extras: Set[str] = set(req.extras.asList() if req.extras else []) self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
self.specifier: SpecifierSet = SpecifierSet(req.specifier) self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: TOptional[Marker] = req.marker if req.marker else None self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def __str__(self) -> str: def __str__(self) -> str:
parts: List[str] = [self.name] parts: List[str] = [self.name]
@ -144,3 +78,18 @@ class Requirement:
def __repr__(self) -> str: def __repr__(self) -> str:
return f"<Requirement('{self}')>" return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
self.name == other.name
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)

File diff suppressed because it is too large Load diff

View file

@ -4,6 +4,7 @@
import logging import logging
import platform import platform
import subprocess
import sys import sys
import sysconfig import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES from importlib.machinery import EXTENSION_SUFFIXES
@ -36,7 +37,7 @@ INTERPRETER_SHORT_NAMES: Dict[str, str] = {
} }
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 _32_BIT_INTERPRETER = sys.maxsize <= 2**32
class Tag: class Tag:
@ -224,10 +225,45 @@ def cpython_tags(
yield Tag(interpreter, "abi3", platform_) yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> Iterator[str]: def _generic_abi() -> List[str]:
abi = sysconfig.get_config_var("SOABI") """
if abi: Return the ABI tag based on EXT_SUFFIX.
yield _normalize_string(abi) """
# The following are examples of `EXT_SUFFIX`.
# We want to keep the parts which are related to the ABI and remove the
# parts which are related to the platform:
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
# - mac: '.cpython-310-darwin.so' => cp310
# - win: '.cp310-win_amd64.pyd' => cp310
# - win: '.pyd' => cp37 (uses _cpython_abis())
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
else:
return []
return [_normalize_string(abi)]
def generic_tags( def generic_tags(
@ -251,8 +287,9 @@ def generic_tags(
interpreter = "".join([interp_name, interp_version]) interpreter = "".join([interp_name, interp_version])
if abis is None: if abis is None:
abis = _generic_abi() abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags()) platforms = list(platforms or platform_tags())
abis = list(abis)
if "none" not in abis: if "none" not in abis:
abis.append("none") abis.append("none")
for abi in abis: for abi in abis:
@ -356,6 +393,22 @@ def mac_platforms(
version_str, _, cpu_arch = platform.mac_ver() version_str, _, cpu_arch = platform.mac_ver()
if version is None: if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else: else:
version = version version = version
if arch is None: if arch is None:
@ -446,6 +499,9 @@ def platform_tags() -> Iterator[str]:
def interpreter_name() -> str: def interpreter_name() -> str:
""" """
Returns the name of the running interpreter. Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
""" """
name = sys.implementation.name name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name return INTERPRETER_SHORT_NAMES.get(name) or name
@ -482,6 +538,9 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
yield from generic_tags() yield from generic_tags()
if interp_name == "pp": if interp_name == "pp":
yield from compatible_tags(interpreter="pp3") interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else: else:
yield from compatible_tags() interp = None
yield from compatible_tags(interpreter=interp)

View file

@ -35,7 +35,9 @@ def canonicalize_name(name: str) -> NormalizedName:
return cast(NormalizedName, value) return cast(NormalizedName, value)
def canonicalize_version(version: Union[Version, str]) -> str: def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
) -> str:
""" """
This is very similar to Version.__str__, but has one subtle difference This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment. with the way it handles the release segment.
@ -56,8 +58,11 @@ def canonicalize_version(version: Union[Version, str]) -> str:
parts.append(f"{parsed.epoch}!") parts.append(f"{parsed.epoch}!")
# Release segment # Release segment
# NB: This strips trailing '.0's to normalize release_segment = ".".join(str(x) for x in parsed.release)
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
parts.append(release_segment)
# Pre-release # Pre-release
if parsed.pre is not None: if parsed.pre is not None:

View file

@ -1,16 +1,20 @@
# This file is dual licensed under the terms of the Apache License, Version # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository # 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details. # for complete details.
"""
.. testsetup::
from packaging.version import parse, Version
"""
import collections import collections
import itertools import itertools
import re import re
import warnings from typing import Callable, Optional, SupportsInt, Tuple, Union
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] __all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
InfiniteTypes = Union[InfinityType, NegativeInfinityType] InfiniteTypes = Union[InfinityType, NegativeInfinityType]
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
@ -29,36 +33,37 @@ LocalType = Union[
CmpKey = Tuple[ CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
] ]
LegacyCmpKey = Tuple[int, Tuple[str, ...]] VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
VersionComparisonMethod = Callable[
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
]
_Version = collections.namedtuple( _Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"] "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
) )
def parse(version: str) -> Union["LegacyVersion", "Version"]: def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
""" """
Parse the given version string and return either a :class:`Version` object return Version(version)
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError): class InvalidVersion(ValueError):
""" """Raised when a version string is not a valid version.
An invalid version was found, users should refer to PEP 440.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
""" """
class _BaseVersion: class _BaseVersion:
_key: Union[CmpKey, LegacyCmpKey] _key: CmpKey
def __hash__(self) -> int: def __hash__(self) -> int:
return hash(self._key) return hash(self._key)
@ -103,126 +108,9 @@ class _BaseVersion:
return self._key != other._key return self._key != other._key
class LegacyVersion(_BaseVersion):
def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
warnings.warn(
"Creating a LegacyVersion has been deprecated and will be "
"removed in the next major release",
DeprecationWarning,
)
def __str__(self) -> str:
return self._version
def __repr__(self) -> str:
return f"<LegacyVersion('{self}')>"
@property
def public(self) -> str:
return self._version
@property
def base_version(self) -> str:
return self._version
@property
def epoch(self) -> int:
return -1
@property
def release(self) -> None:
return None
@property
def pre(self) -> None:
return None
@property
def post(self) -> None:
return None
@property
def dev(self) -> None:
return None
@property
def local(self) -> None:
return None
@property
def is_prerelease(self) -> bool:
return False
@property
def is_postrelease(self) -> bool:
return False
@property
def is_devrelease(self) -> bool:
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s: str) -> Iterator[str]:
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
return epoch, tuple(parts)
# Deliberately not anchored to the start and end of the string, to make it # Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse # easier for 3rd party code to reuse
VERSION_PATTERN = r""" _VERSION_PATTERN = r"""
v? v?
(?: (?:
(?:(?P<epoch>[0-9]+)!)? # epoch (?:(?P<epoch>[0-9]+)!)? # epoch
@ -253,12 +141,55 @@ VERSION_PATTERN = r"""
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
""" """
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
class Version(_BaseVersion): class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None: def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces # Validate the version and parse it into pieces
match = self._regex.search(version) match = self._regex.search(version)
@ -288,9 +219,19 @@ class Version(_BaseVersion):
) )
def __repr__(self) -> str: def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>" return f"<Version('{self}')>"
def __str__(self) -> str: def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = [] parts = []
# Epoch # Epoch
@ -320,29 +261,80 @@ class Version(_BaseVersion):
@property @property
def epoch(self) -> int: def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch _epoch: int = self._version.epoch
return _epoch return _epoch
@property @property
def release(self) -> Tuple[int, ...]: def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release _release: Tuple[int, ...] = self._version.release
return _release return _release
@property @property
def pre(self) -> Optional[Tuple[str, int]]: def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre _pre: Optional[Tuple[str, int]] = self._version.pre
return _pre return _pre
@property @property
def post(self) -> Optional[int]: def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None return self._version.post[1] if self._version.post else None
@property @property
def dev(self) -> Optional[int]: def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None return self._version.dev[1] if self._version.dev else None
@property @property
def local(self) -> Optional[str]: def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local: if self._version.local:
return ".".join(str(x) for x in self._version.local) return ".".join(str(x) for x in self._version.local)
else: else:
@ -350,10 +342,31 @@ class Version(_BaseVersion):
@property @property
def public(self) -> str: def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0] return str(self).split("+", 1)[0]
@property @property
def base_version(self) -> str: def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = [] parts = []
# Epoch # Epoch
@ -367,26 +380,72 @@ class Version(_BaseVersion):
@property @property
def is_prerelease(self) -> bool: def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None return self.dev is not None or self.pre is not None
@property @property
def is_postrelease(self) -> bool: def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None return self.post is not None
@property @property
def is_devrelease(self) -> bool: def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None return self.dev is not None
@property @property
def major(self) -> int: def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0 return self.release[0] if len(self.release) >= 1 else 0
@property @property
def minor(self) -> int: def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0 return self.release[1] if len(self.release) >= 2 else 0
@property @property
def micro(self) -> int: def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0 return self.release[2] if len(self.release) >= 3 else 0

View file

@ -0,0 +1,342 @@
"""
Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
usage.
"""
from __future__ import annotations
import os
import sys
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
from typing import Literal
else: # pragma: no cover (py38+)
from ..typing_extensions import Literal
from .api import PlatformDirsABC
from .version import __version__
from .version import __version_tuple__ as __version_info__
def _set_platform_dir_class() -> type[PlatformDirsABC]:
if sys.platform == "win32":
from .windows import Windows as Result
elif sys.platform == "darwin":
from .macos import MacOS as Result
else:
from .unix import Unix as Result
if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
if os.getenv("SHELL") or os.getenv("PREFIX"):
return Result
from .android import _android_folder
if _android_folder() is not None:
from .android import Android
return Android # return to avoid redefinition of result
return Result
PlatformDirs = _set_platform_dir_class() #: Currently active platform
AppDirs = PlatformDirs #: Backwards compatibility with appdirs
def user_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
def site_data_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data directory shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
def user_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
def site_config_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config directory shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
def user_cache_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
def user_state_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
def user_log_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
def user_documents_dir() -> str:
"""
:returns: documents directory tied to the user
"""
return PlatformDirs().user_documents_dir
def user_runtime_dir(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> str:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime directory tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
def user_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: data path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
def site_data_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: data path shared by users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
def user_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: config path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
def site_config_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
multipath: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
:returns: config path shared by the users
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
def user_cache_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: cache path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
def user_state_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
:returns: state path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
def user_log_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: log path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
def user_documents_path() -> Path:
"""
:returns: documents path tied to the user
"""
return PlatformDirs().user_documents_path
def user_runtime_path(
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
opinion: bool = True,
) -> Path:
"""
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
:returns: runtime path tied to the user
"""
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
__all__ = [
"__version__",
"__version_info__",
"PlatformDirs",
"AppDirs",
"PlatformDirsABC",
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
"user_data_path",
"user_config_path",
"user_cache_path",
"user_state_path",
"user_log_path",
"user_documents_path",
"user_runtime_path",
"site_data_path",
"site_config_path",
]

View file

@ -0,0 +1,46 @@
from __future__ import annotations
from platformdirs import PlatformDirs, __version__
PROPS = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"user_documents_dir",
"user_runtime_dir",
"site_data_dir",
"site_config_dir",
)
def main() -> None:
app_name = "MyApp"
app_author = "MyCompany"
print(f"-- platformdirs {__version__} --")
print("-- app dirs (with optional 'version')")
dirs = PlatformDirs(app_name, app_author, version="1.0")
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'version')")
dirs = PlatformDirs(app_name, app_author)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (without optional 'appauthor')")
dirs = PlatformDirs(app_name)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
print("\n-- app dirs (with disabled 'appauthor')")
dirs = PlatformDirs(app_name, appauthor=False)
for prop in PROPS:
print(f"{prop}: {getattr(dirs, prop)}")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,120 @@
from __future__ import annotations
import os
import re
import sys
from functools import lru_cache
from typing import cast
from .api import PlatformDirsABC
class Android(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
"""
return _android_documents_folder()
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp")
return path
@lru_cache(maxsize=1)
def _android_folder() -> str | None:
""":return: base folder for the Android OS or None if cannot be found"""
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception:
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
__all__ = [
"Android",
]

View file

@ -0,0 +1,156 @@
from __future__ import annotations
import os
import sys
from abc import ABC, abstractmethod
from pathlib import Path
if sys.version_info >= (3, 8): # pragma: no branch
from typing import Literal # pragma: no cover
class PlatformDirsABC(ABC):
"""
Abstract base class for platform directories.
"""
def __init__(
self,
appname: str | None = None,
appauthor: str | None | Literal[False] = None,
version: str | None = None,
roaming: bool = False,
multipath: bool = False,
opinion: bool = True,
):
"""
Create a new platform directory.
:param appname: See `appname`.
:param appauthor: See `appauthor`.
:param version: See `version`.
:param roaming: See `roaming`.
:param multipath: See `multipath`.
:param opinion: See `opinion`.
"""
self.appname = appname #: The name of application.
self.appauthor = appauthor
"""
The name of the app author or distributing body for this application. Typically, it is the owning company name.
Defaults to `appname`. You may pass ``False`` to disable it.
"""
self.version = version
"""
An optional version path element to append to the path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``.
"""
self.roaming = roaming
"""
Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
for roaming profiles, this user data will be synced on login (see
`here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
"""
self.multipath = multipath
"""
An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
returned. By default, the first item would only be returned.
"""
self.opinion = opinion #: A flag to indicating to use opinionated values.
def _append_app_name_and_version(self, *base: str) -> str:
params = list(base[1:])
if self.appname:
params.append(self.appname)
if self.version:
params.append(self.version)
return os.path.join(base[0], *params)
@property
@abstractmethod
def user_data_dir(self) -> str:
""":return: data directory tied to the user"""
@property
@abstractmethod
def site_data_dir(self) -> str:
""":return: data directory shared by users"""
@property
@abstractmethod
def user_config_dir(self) -> str:
""":return: config directory tied to the user"""
@property
@abstractmethod
def site_config_dir(self) -> str:
""":return: config directory shared by the users"""
@property
@abstractmethod
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user"""
@property
@abstractmethod
def user_state_dir(self) -> str:
""":return: state directory tied to the user"""
@property
@abstractmethod
def user_log_dir(self) -> str:
""":return: log directory tied to the user"""
@property
@abstractmethod
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user"""
@property
@abstractmethod
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user"""
@property
def user_data_path(self) -> Path:
""":return: data path tied to the user"""
return Path(self.user_data_dir)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users"""
return Path(self.site_data_dir)
@property
def user_config_path(self) -> Path:
""":return: config path tied to the user"""
return Path(self.user_config_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users"""
return Path(self.site_config_dir)
@property
def user_cache_path(self) -> Path:
""":return: cache path tied to the user"""
return Path(self.user_cache_dir)
@property
def user_state_path(self) -> Path:
""":return: state path tied to the user"""
return Path(self.user_state_dir)
@property
def user_log_path(self) -> Path:
""":return: log path tied to the user"""
return Path(self.user_log_dir)
@property
def user_documents_path(self) -> Path:
""":return: documents path tied to the user"""
return Path(self.user_documents_dir)
@property
def user_runtime_path(self) -> Path:
""":return: runtime path tied to the user"""
return Path(self.user_runtime_dir)

View file

@ -0,0 +1,64 @@
from __future__ import annotations
import os
from .api import PlatformDirsABC
class MacOS(PlatformDirsABC):
"""
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and
`version <platformdirs.api.PlatformDirsABC.version>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/"))
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version("/Library/Application Support")
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``"""
return self._append_app_name_and_version("/Library/Preferences")
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return os.path.expanduser("~/Documents")
@property
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
__all__ = [
"MacOS",
]

View file

@ -0,0 +1,181 @@
from __future__ import annotations
import os
import sys
from configparser import ConfigParser
from pathlib import Path
from .api import PlatformDirsABC
if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
from os import getuid
else:
def getuid() -> int:
raise RuntimeError("should only be used on Linux")
class Unix(PlatformDirsABC):
"""
On Unix/Linux, we follow the
`XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
overriding directories with environment variables. The examples show are the default values, alongside the name of
the environment variable that overrides them. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`multipath <platformdirs.api.PlatformDirsABC.multipath>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`.
"""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
``$XDG_DATA_HOME/$appname/$version``
"""
path = os.environ.get("XDG_DATA_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/share")
return self._append_app_name_and_version(path)
@property
def site_data_dir(self) -> str:
"""
:return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
"""
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
path = os.environ.get("XDG_DATA_DIRS", "")
if not path.strip():
path = f"/usr/local/share{os.pathsep}/usr/share"
return self._with_multi_path(path)
def _with_multi_path(self, path: str) -> str:
path_list = path.split(os.pathsep)
if not self.multipath:
path_list = path_list[0:1]
path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
return os.pathsep.join(path_list)
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
``$XDG_CONFIG_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CONFIG_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.config")
return self._append_app_name_and_version(path)
@property
def site_config_dir(self) -> str:
"""
:return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
path separator), e.g. ``/etc/xdg/$appname/$version``
"""
# XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
path = os.environ.get("XDG_CONFIG_DIRS", "")
if not path.strip():
path = "/etc/xdg"
return self._with_multi_path(path)
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
``~/$XDG_CACHE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_CACHE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.cache")
return self._append_app_name_and_version(path)
@property
def user_state_dir(self) -> str:
"""
:return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
``$XDG_STATE_HOME/$appname/$version``
"""
path = os.environ.get("XDG_STATE_HOME", "")
if not path.strip():
path = os.path.expanduser("~/.local/state")
return self._append_app_name_and_version(path)
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
"""
path = self.user_state_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user, e.g. ``~/Documents``
"""
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
if documents_dir is None:
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
if not documents_dir:
documents_dir = os.path.expanduser("~/Documents")
return documents_dir
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
``$XDG_RUNTIME_DIR/$appname/$version``
"""
path = os.environ.get("XDG_RUNTIME_DIR", "")
if not path.strip():
path = f"/run/user/{getuid()}"
return self._append_app_name_and_version(path)
@property
def site_data_path(self) -> Path:
""":return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_data_dir)
@property
def site_config_path(self) -> Path:
""":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_config_dir)
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
if self.multipath:
# If multipath is True, the first path is returned.
directory = directory.split(os.pathsep)[0]
return Path(directory)
def _get_user_dirs_folder(key: str) -> str | None:
"""Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
if os.path.exists(user_dirs_config_path):
parser = ConfigParser()
with open(user_dirs_config_path) as stream:
# Add fake section header, so ConfigParser doesn't complain
parser.read_string(f"[top]\n{stream.read()}")
if key not in parser["top"]:
return None
path = parser["top"][key].strip('"')
# Handle relative home paths
path = path.replace("$HOME", os.path.expanduser("~"))
return path
return None
__all__ = [
"Unix",
]

View file

@ -0,0 +1,4 @@
# file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '2.6.2'
__version_tuple__ = version_tuple = (2, 6, 2)

View file

@ -0,0 +1,184 @@
from __future__ import annotations
import ctypes
import os
import sys
from functools import lru_cache
from typing import Callable
from .api import PlatformDirsABC
class Windows(PlatformDirsABC):
"""`MSDN on where to store app data files
<http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`roaming <platformdirs.api.PlatformDirsABC.roaming>`,
`opinion <platformdirs.api.PlatformDirsABC.opinion>`."""
@property
def user_data_dir(self) -> str:
"""
:return: data directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
"""
const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(get_win_folder(const))
return self._append_parts(path)
def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
params = []
if self.appname:
if self.appauthor is not False:
author = self.appauthor or self.appname
params.append(author)
params.append(self.appname)
if opinion_value is not None and self.opinion:
params.append(opinion_value)
if self.version:
params.append(self.version)
return os.path.join(path, *params)
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
return self._append_parts(path)
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `site_data_dir`"""
return self.site_data_dir
@property
def user_cache_dir(self) -> str:
"""
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
"""
path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
return self._append_parts(path, opinion_value="Cache")
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
"""
path = self.user_data_dir
if self.opinion:
path = os.path.join(path, "Logs")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
"""
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, e.g.
``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
"""
path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
return self._append_parts(path)
def get_win_folder_from_env_vars(csidl_name: str) -> str:
"""Get folder from environment variables."""
if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
env_var_name = {
"CSIDL_APPDATA": "APPDATA",
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
}.get(csidl_name)
if env_var_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
result = os.environ.get(env_var_name)
if result is None:
raise ValueError(f"Unset environment variable: {env_var_name}")
return result
def get_win_folder_from_registry(csidl_name: str) -> str:
"""Get folder from the registry.
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
"CSIDL_PERSONAL": "Personal",
}.get(csidl_name)
if shell_folder_name is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
raise NotImplementedError
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
directory, _ = winreg.QueryValueEx(key, shell_folder_name)
return str(directory)
def get_win_folder_via_ctypes(csidl_name: str) -> str:
"""Get folder with ctypes."""
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
"CSIDL_PERSONAL": 5,
}.get(csidl_name)
if csidl_const is None:
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
buf = ctypes.create_unicode_buffer(1024)
windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if it has highbit chars.
if any(ord(c) > 255 for c in buf):
buf2 = ctypes.create_unicode_buffer(1024)
if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _pick_get_win_folder() -> Callable[[str], str]:
if hasattr(ctypes, "windll"):
return get_win_folder_via_ctypes
try:
import winreg # noqa: F401
except ImportError:
return get_win_folder_from_env_vars
else:
return get_win_folder_from_registry
get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
__all__ = [
"Windows",
]

View file

@ -1,331 +0,0 @@
# module pyparsing.py
#
# Copyright (c) 2003-2022 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print(hello, "->", greet.parse_string(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of :class:`'+'<And>`,
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
from typing import NamedTuple
class version_info(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@property
def __version__(self):
return (
"{}.{}.{}".format(self.major, self.minor, self.micro)
+ (
"{}{}{}".format(
"r" if self.releaselevel[0] == "c" else "",
self.releaselevel[0],
self.serial,
),
"",
)[self.releaselevel == "final"]
)
def __str__(self):
return "{} {} / {}".format(__name__, self.__version__, __version_time__)
def __repr__(self):
return "{}.{}({})".format(
__name__,
type(self).__name__,
", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
)
__version_info__ = version_info(3, 0, 9, "final", 0)
__version_time__ = "05 May 2022 07:02 UTC"
__version__ = __version_info__.__version__
__versionTime__ = __version_time__
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
from .util import *
from .exceptions import *
from .actions import *
from .core import __diag__, __compat__
from .results import *
from .core import *
from .core import _builtin_exprs as core_builtin_exprs
from .helpers import *
from .helpers import _builtin_exprs as helper_builtin_exprs
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
from .testing import pyparsing_test as testing
from .common import (
pyparsing_common as common,
_builtin_exprs as common_builtin_exprs,
)
# define backward compat synonyms
if "pyparsing_unicode" not in globals():
pyparsing_unicode = unicode
if "pyparsing_common" not in globals():
pyparsing_common = common
if "pyparsing_test" not in globals():
pyparsing_test = testing
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
__all__ = [
"__version__",
"__version_time__",
"__author__",
"__compat__",
"__diag__",
"And",
"AtLineStart",
"AtStringStart",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"IndentedBlock",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"Located",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"OpAssoc",
"Opt",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"PositionToken",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"any_close_tag",
"any_open_tag",
"c_style_comment",
"col",
"common_html_entity",
"counted_array",
"cpp_style_comment",
"dbl_quoted_string",
"dbl_slash_comment",
"delimited_list",
"dict_of",
"empty",
"hexnums",
"html_comment",
"identchars",
"identbodychars",
"java_style_comment",
"line",
"line_end",
"line_start",
"lineno",
"make_html_tags",
"make_xml_tags",
"match_only_at_col",
"match_previous_expr",
"match_previous_literal",
"nested_expr",
"null_debug_action",
"nums",
"one_of",
"printables",
"punc8bit",
"python_style_comment",
"quoted_string",
"remove_quotes",
"replace_with",
"replace_html_entity",
"rest_of_line",
"sgl_quoted_string",
"srange",
"string_end",
"string_start",
"trace_parse_action",
"unicode_string",
"with_attribute",
"indentedBlock",
"original_text_for",
"ungroup",
"infix_notation",
"locatedExpr",
"with_class",
"CloseMatch",
"token_map",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"condition_as_parse_action",
"pyparsing_test",
# pre-PEP8 compatibility names
"__versionTime__",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"htmlComment",
"javaStyleComment",
"lineEnd",
"lineStart",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"oneOf",
"opAssoc",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"withAttribute",
"indentedBlock",
"originalTextFor",
"infixNotation",
"locatedExpr",
"withClass",
"tokenMap",
"conditionAsParseAction",
"autoname_elements",
]

View file

@ -1,207 +0,0 @@
# actions.py
from .exceptions import ParseException
from .util import col
class OnlyOnce:
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, method_call):
from .core import _trim_arity
self.callable = _trim_arity(method_call)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
def reset(self):
"""
Allow the associated parse action to be called once more.
"""
self.called = False
def match_only_at_col(n):
"""
Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verify_col(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column {}".format(n))
return verify_col
def replace_with(repl_str):
"""
Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transform_string<ParserElement.transform_string>` ().
Example::
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
term = na | num
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [repl_str]
def remove_quotes(s, l, t):
"""
Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use remove_quotes to strip quotation marks from parsed results
quoted_string.set_parse_action(remove_quotes)
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def with_attribute(*args, **attr_dict):
"""
Helper to create a validating parse action to be used with start
tags created with :class:`make_xml_tags` or
:class:`make_html_tags`. Use ``with_attribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``with_attribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`with_class`.
To verify that the attribute exists, but without specifying a value,
pass ``with_attribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = make_html_tags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().set_parse_action(with_attribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.search_string(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.search_string(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attr_dict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(
s,
l,
"attribute {!r} has value {!r}, must be {!r}".format(
attrName, tokens[attrName], attrValue
),
)
return pa
with_attribute.ANY_VALUE = object()
def with_class(classname, namespace=""):
"""
Simplified version of :class:`with_attribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this &lt;div&gt; has no class</div>
</div>
'''
div,div_end = make_html_tags("div")
div_grid = div().set_parse_action(with_class("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.search_string(html):
print(grid_header.body)
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.search_string(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "{}:class".format(namespace) if namespace else "class"
return with_attribute(**{classattr: classname})
# pre-PEP8 compatibility symbols
replaceWith = replace_with
removeQuotes = remove_quotes
withAttribute = with_attribute
withClass = with_class
matchOnlyAtCol = match_only_at_col

View file

@ -1,424 +0,0 @@
# common.py
from .core import *
from .helpers import delimited_list, any_open_tag, any_close_tag
from datetime import datetime
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
- :class:`url`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convert_to_integer = token_map(int)
"""
Parse action for converting parsed integers to Python int
"""
convert_to_float = token_map(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = (
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
)
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = (
Regex(r"[+-]?\d+")
.set_name("signed integer")
.set_parse_action(convert_to_integer)
)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (
signed_integer().set_parse_action(convert_to_float)
+ "/"
+ signed_integer().set_parse_action(convert_to_float)
).set_name("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
mixed_integer = (
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
).set_name("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.add_parse_action(sum)
real = (
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
.set_name("real number")
.set_parse_action(convert_to_float)
)
"""expression that parses a floating point number and returns a float"""
sci_real = (
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
.set_name("real number with scientific notation")
.set_parse_action(convert_to_float)
)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).setName("number").streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
.set_name("fnumber")
.set_parse_action(convert_to_float)
)
"""any int or real number, returned as float"""
identifier = Word(identchars, identbodychars).set_name("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).set_name("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
"full IPv6 address"
)
_short_ipv6_address = (
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).set_name("short IPv6 address")
_short_ipv6_address.add_condition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
"IPv6 address"
)
).set_name("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).set_name("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convert_to_date(fmt: str = "%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(ss, ll, tt):
try:
return datetime.strptime(tt[0], fmt).date()
except ValueError as ve:
raise ParseException(ss, ll, str(ve))
return cvt_fn
@staticmethod
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).set_name("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).set_name("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
@staticmethod
def strip_html_tags(s: str, l: int, tokens: ParseResults):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transform_string(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, exclude_chars=",")
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
)
)
.streamline()
.set_name("commaItem")
)
comma_separated_list = delimited_list(
Opt(quoted_string.copy() | _commasepitem, default="")
).set_name("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
"""Parse action to convert tokens to upper case."""
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
"""Parse action to convert tokens to lower case."""
# fmt: off
url = Regex(
# https://mathiasbynens.be/demo/url-regex
# https://gist.github.com/dperini/729294
r"^" +
# protocol identifier (optional)
# short syntax // still required
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
# user:pass BasicAuth (optional)
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
r"(?P<host>" +
# IP address exclusion
# private & local networks
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
r"|" +
# host & domain names, may end with dot
# can be replaced by a shortest alternative
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
r"(?:" +
r"(?:" +
r"[a-z0-9\u00a1-\uffff]" +
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
r")?" +
r"[a-z0-9\u00a1-\uffff]\." +
r")+" +
# TLD identifier name, may end with dot
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
r")" +
# port number (optional)
r"(:(?P<port>\d{2,5}))?" +
# resource path (optional)
r"(?P<path>\/[^?# ]*)?" +
# query string (optional)
r"(\?(?P<query>[^#]*))?" +
# fragment (optional)
r"(#(?P<fragment>\S*))?" +
r"$"
).set_name("url")
# fmt: on
# pre-PEP8 compatibility names
convertToInteger = convert_to_integer
convertToFloat = convert_to_float
convertToDate = convert_to_date
convertToDatetime = convert_to_datetime
stripHTMLTags = strip_html_tags
upcaseTokens = upcase_tokens
downcaseTokens = downcase_tokens
_builtin_exprs = [
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
]

File diff suppressed because it is too large Load diff

View file

@ -1,642 +0,0 @@
import railroad
import pyparsing
import typing
from typing import (
List,
NamedTuple,
Generic,
TypeVar,
Dict,
Callable,
Set,
Iterable,
)
from jinja2 import Template
from io import StringIO
import inspect
jinja2_template_source = """\
<!DOCTYPE html>
<html>
<head>
{% if not head %}
<style type="text/css">
.railroad-heading {
font-family: monospace;
}
</style>
{% else %}
{{ head | safe }}
{% endif %}
</head>
<body>
{{ body | safe }}
{% for diagram in diagrams %}
<div class="railroad-group">
<h1 class="railroad-heading">{{ diagram.title }}</h1>
<div class="railroad-description">{{ diagram.text }}</div>
<div class="railroad-svg">
{{ diagram.svg }}
</div>
</div>
{% endfor %}
</body>
</html>
"""
template = Template(jinja2_template_source)
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
NamedDiagram = NamedTuple(
"NamedDiagram",
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
)
"""
A simple structure for associating a name with a railroad diagram
"""
T = TypeVar("T")
class EachItem(railroad.Group):
"""
Custom railroad item to compose a:
- Group containing a
- OneOrMore containing a
- Choice of the elements in the Each
with the group label indicating that all must be matched
"""
all_label = "[ALL]"
def __init__(self, *items):
choice_item = railroad.Choice(len(items) - 1, *items)
one_or_more_item = railroad.OneOrMore(item=choice_item)
super().__init__(one_or_more_item, label=self.all_label)
class AnnotatedItem(railroad.Group):
"""
Simple subclass of Group that creates an annotation label
"""
def __init__(self, label: str, item):
super().__init__(item=item, label="[{}]".format(label) if label else label)
class EditablePartial(Generic[T]):
"""
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
constructed.
"""
# We need this here because the railroad constructors actually transform the data, so can't be called until the
# entire tree is assembled
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
self.func = func
self.args = args
self.kwargs = kwargs
@classmethod
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
"""
If you call this function in the same way that you would call the constructor, it will store the arguments
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
"""
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
@property
def name(self):
return self.kwargs["name"]
def __call__(self) -> T:
"""
Evaluate the partial and return the result
"""
args = self.args.copy()
kwargs = self.kwargs.copy()
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
# args=['list', 'of', 'things'])
arg_spec = inspect.getfullargspec(self.func)
if arg_spec.varargs in self.kwargs:
args += kwargs.pop(arg_spec.varargs)
return self.func(*args, **kwargs)
def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
"""
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
:params kwargs: kwargs to be passed in to the template
"""
data = []
for diagram in diagrams:
if diagram.diagram is None:
continue
io = StringIO()
diagram.diagram.writeSvg(io.write)
title = diagram.name
if diagram.index == 0:
title += " (root)"
data.append({"title": title, "text": "", "svg": io.getvalue()})
return template.render(diagrams=data, **kwargs)
def resolve_partial(partial: "EditablePartial[T]") -> T:
"""
Recursively resolves a collection of Partials into whatever type they are
"""
if isinstance(partial, EditablePartial):
partial.args = resolve_partial(partial.args)
partial.kwargs = resolve_partial(partial.kwargs)
return partial()
elif isinstance(partial, list):
return [resolve_partial(x) for x in partial]
elif isinstance(partial, dict):
return {key: resolve_partial(x) for key, x in partial.items()}
else:
return partial
def to_railroad(
element: pyparsing.ParserElement,
diagram_kwargs: typing.Optional[dict] = None,
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
) -> List[NamedDiagram]:
"""
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
creation if you want to access the Railroad tree before it is converted to HTML
:param element: base element of the parser being diagrammed
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
:param vertical: (optional) - int - limit at which number of alternatives should be
shown vertically instead of horizontally
:param show_results_names - bool to indicate whether results name annotations should be
included in the diagram
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
surrounding box
"""
# Convert the whole tree underneath the root
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
_to_diagram_element(
element,
lookup=lookup,
parent=None,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
)
root_id = id(element)
# Convert the root if it hasn't been already
if root_id in lookup:
if not element.customName:
lookup[root_id].name = ""
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
# Now that we're finished, we can convert from intermediate structures into Railroad elements
diags = list(lookup.diagrams.values())
if len(diags) > 1:
# collapse out duplicate diags with the same name
seen = set()
deduped_diags = []
for d in diags:
# don't extract SkipTo elements, they are uninformative as subdiagrams
if d.name == "...":
continue
if d.name is not None and d.name not in seen:
seen.add(d.name)
deduped_diags.append(d)
resolved = [resolve_partial(partial) for partial in deduped_diags]
else:
# special case - if just one diagram, always display it, even if
# it has no name
resolved = [resolve_partial(partial) for partial in diags]
return sorted(resolved, key=lambda diag: diag.index)
def _should_vertical(
specification: int, exprs: Iterable[pyparsing.ParserElement]
) -> bool:
"""
Returns true if we should return a vertical list of elements
"""
if specification is None:
return False
else:
return len(_visible_exprs(exprs)) >= specification
class ElementState:
"""
State recorded for an individual pyparsing Element
"""
# Note: this should be a dataclass, but we have to support Python 3.5
def __init__(
self,
element: pyparsing.ParserElement,
converted: EditablePartial,
parent: EditablePartial,
number: int,
name: str = None,
parent_index: typing.Optional[int] = None,
):
#: The pyparsing element that this represents
self.element: pyparsing.ParserElement = element
#: The name of the element
self.name: typing.Optional[str] = name
#: The output Railroad element in an unconverted state
self.converted: EditablePartial = converted
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
self.parent: EditablePartial = parent
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
self.number: int = number
#: The index of this inside its parent
self.parent_index: typing.Optional[int] = parent_index
#: If true, we should extract this out into a subdiagram
self.extract: bool = False
#: If true, all of this element's children have been filled out
self.complete: bool = False
def mark_for_extraction(
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
):
"""
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
:param el_id: id of the element
:param state: element/diagram state tracker
:param name: name to use for this element's text
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
root element when we know we're finished
"""
self.extract = True
# Set the name
if not self.name:
if name:
# Allow forcing a custom name
self.name = name
elif self.element.customName:
self.name = self.element.customName
else:
self.name = ""
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
# to be added
# Also, if this is just a string literal etc, don't bother extracting it
if force or (self.complete and _worth_extracting(self.element)):
state.extract_into_diagram(el_id)
class ConverterState:
"""
Stores some state that persists between recursions into the element tree
"""
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
#: A dictionary mapping ParserElements to state relating to them
self._element_diagram_states: Dict[int, ElementState] = {}
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
#: The index of the next unnamed element
self.unnamed_index: int = 1
#: The index of the next element. This is used for sorting
self.index: int = 0
#: Shared kwargs that are used to customize the construction of diagrams
self.diagram_kwargs: dict = diagram_kwargs or {}
self.extracted_diagram_names: Set[str] = set()
def __setitem__(self, key: int, value: ElementState):
self._element_diagram_states[key] = value
def __getitem__(self, key: int) -> ElementState:
return self._element_diagram_states[key]
def __delitem__(self, key: int):
del self._element_diagram_states[key]
def __contains__(self, key: int):
return key in self._element_diagram_states
def generate_unnamed(self) -> int:
"""
Generate a number used in the name of an otherwise unnamed diagram
"""
self.unnamed_index += 1
return self.unnamed_index
def generate_index(self) -> int:
"""
Generate a number used to index a diagram
"""
self.index += 1
return self.index
def extract_into_diagram(self, el_id: int):
"""
Used when we encounter the same token twice in the same tree. When this
happens, we replace all instances of that token with a terminal, and
create a new subdiagram for the token
"""
position = self[el_id]
# Replace the original definition of this element with a regular block
if position.parent:
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
if "item" in position.parent.kwargs:
position.parent.kwargs["item"] = ret
elif "items" in position.parent.kwargs:
position.parent.kwargs["items"][position.parent_index] = ret
# If the element we're extracting is a group, skip to its content but keep the title
if position.converted.func == railroad.Group:
content = position.converted.kwargs["item"]
else:
content = position.converted
self.diagrams[el_id] = EditablePartial.from_call(
NamedDiagram,
name=position.name,
diagram=EditablePartial.from_call(
railroad.Diagram, content, **self.diagram_kwargs
),
index=position.number,
)
del self[el_id]
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
"""
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
themselves have children, then its complex enough to extract
"""
children = element.recurse()
return any(child.recurse() for child in children)
def _apply_diagram_item_enhancements(fn):
"""
decorator to ensure enhancements to a diagram item (such as results name annotations)
get applied on return from _to_diagram_element (we do this since there are several
returns in _to_diagram_element)
"""
def _inner(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
ret = fn(
element,
parent,
lookup,
vertical,
index,
name_hint,
show_results_names,
show_groups,
)
# apply annotation for results name, if present
if show_results_names and ret is not None:
element_results_name = element.resultsName
if element_results_name:
# add "*" to indicate if this is a "list all results" name
element_results_name += "" if element.modalResults else "*"
ret = EditablePartial.from_call(
railroad.Group, item=ret, label=element_results_name
)
return ret
return _inner
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
non_diagramming_exprs = (
pyparsing.ParseElementEnhance,
pyparsing.PositionToken,
pyparsing.And._ErrorStop,
)
return [
e
for e in exprs
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
]
@_apply_diagram_item_enhancements
def _to_diagram_element(
element: pyparsing.ParserElement,
parent: typing.Optional[EditablePartial],
lookup: ConverterState = None,
vertical: int = None,
index: int = 0,
name_hint: str = None,
show_results_names: bool = False,
show_groups: bool = False,
) -> typing.Optional[EditablePartial]:
"""
Recursively converts a PyParsing Element to a railroad Element
:param lookup: The shared converter state that keeps track of useful things
:param index: The index of this element within the parent
:param parent: The parent of this element in the output tree
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
do so
:param name_hint: If provided, this will override the generated name
:param show_results_names: bool flag indicating whether to add annotations for results names
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
:param show_groups: bool flag indicating whether to show groups using bounding box
"""
exprs = element.recurse()
name = name_hint or element.customName or element.__class__.__name__
# Python's id() is used to provide a unique identifier for elements
el_id = id(element)
element_results_name = element.resultsName
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
if not element.customName:
if isinstance(
element,
(
# pyparsing.TokenConverter,
# pyparsing.Forward,
pyparsing.Located,
),
):
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
if exprs:
if not exprs[0].customName:
propagated_name = name
else:
propagated_name = None
return _to_diagram_element(
element.expr,
parent=parent,
lookup=lookup,
vertical=vertical,
index=index,
name_hint=propagated_name,
show_results_names=show_results_names,
show_groups=show_groups,
)
# If the element isn't worth extracting, we always treat it as the first time we say it
if _worth_extracting(element):
if el_id in lookup:
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
# so we have to extract it into a new diagram.
looked_up = lookup[el_id]
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
return ret
elif el_id in lookup.diagrams:
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
# just put in a marker element that refers to the sub-diagram
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret
# Recursively convert child elements
# Here we find the most relevant Railroad element for matching pyparsing Element
# We use ``items=[]`` here to hold the place for where the child elements will go once created
if isinstance(element, pyparsing.And):
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
# (all will have the same name, and resultsName)
if not exprs:
return None
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
ret = EditablePartial.from_call(
railroad.OneOrMore, item="", repeat=str(len(exprs))
)
elif _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Stack, items=[])
else:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
if not exprs:
return None
if _should_vertical(vertical, exprs):
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
else:
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
elif isinstance(element, pyparsing.Each):
if not exprs:
return None
ret = EditablePartial.from_call(EachItem, items=[])
elif isinstance(element, pyparsing.NotAny):
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
elif isinstance(element, pyparsing.FollowedBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
elif isinstance(element, pyparsing.PrecededBy):
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
elif isinstance(element, pyparsing.Group):
if show_groups:
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
else:
ret = EditablePartial.from_call(railroad.Group, label="", item="")
elif isinstance(element, pyparsing.TokenConverter):
ret = EditablePartial.from_call(
AnnotatedItem, label=type(element).__name__.lower(), item=""
)
elif isinstance(element, pyparsing.Opt):
ret = EditablePartial.from_call(railroad.Optional, item="")
elif isinstance(element, pyparsing.OneOrMore):
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
elif isinstance(element, pyparsing.ZeroOrMore):
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
elif isinstance(element, pyparsing.Group):
ret = EditablePartial.from_call(
railroad.Group, item=None, label=element_results_name
)
elif isinstance(element, pyparsing.Empty) and not element.customName:
# Skip unnamed "Empty" elements
ret = None
elif len(exprs) > 1:
ret = EditablePartial.from_call(railroad.Sequence, items=[])
elif len(exprs) > 0 and not element_results_name:
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
else:
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
ret = terminal
if ret is None:
return
# Indicate this element's position in the tree so we can extract it if necessary
lookup[el_id] = ElementState(
element=element,
converted=ret,
parent=parent,
parent_index=index,
number=lookup.generate_index(),
)
if element.customName:
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
i = 0
for expr in exprs:
# Add a placeholder index in case we have to extract the child before we even add it to the parent
if "items" in ret.kwargs:
ret.kwargs["items"].insert(i, None)
item = _to_diagram_element(
expr,
parent=ret,
lookup=lookup,
vertical=vertical,
index=i,
show_results_names=show_results_names,
show_groups=show_groups,
)
# Some elements don't need to be shown in the diagram
if item is not None:
if "item" in ret.kwargs:
ret.kwargs["item"] = item
elif "items" in ret.kwargs:
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
ret.kwargs["items"][i] = item
i += 1
elif "items" in ret.kwargs:
# If we're supposed to skip this element, remove it from the parent
del ret.kwargs["items"][i]
# If all this items children are none, skip this item
if ret and (
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
):
ret = EditablePartial.from_call(railroad.Terminal, name)
# Mark this element as "complete", ie it has all of its children
if el_id in lookup:
lookup[el_id].complete = True
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
lookup.extract_into_diagram(el_id)
if ret is not None:
ret = EditablePartial.from_call(
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
)
return ret

View file

@ -1,267 +0,0 @@
# exceptions.py
import re
import sys
import typing
from .util import col, line, lineno, _collapse_string_to_ranges
from .unicode import pyparsing_unicode as ppu
class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
pass
_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self,
pstr: str,
loc: int = 0,
msg: typing.Optional[str] = None,
elem=None,
):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parser_element = self.parserElement = elem
self.args = (pstr, loc, msg)
@staticmethod
def explain_exception(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
"""
import inspect
from .core import ParserElement
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(" " * (exc.column - 1) + "^")
ret.append("{}: {}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get("self", None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
continue
if id(f_self) in seen:
continue
seen.add(id(f_self))
self_type = type(f_self)
ret.append(
"{}.{} - {}".format(
self_type.__module__, self_type.__name__, f_self
)
)
elif f_self is not None:
self_type = type(f_self)
ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
else:
code = frm.f_code
if code.co_name in ("wrapper", "<module>"):
continue
ret.append("{}".format(code.co_name))
depth -= 1
if not depth:
break
return "\n".join(ret)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
@property
def line(self) -> str:
"""
Return the line of text where the exception occurred.
"""
return line(self.loc, self.pstr)
@property
def lineno(self) -> int:
"""
Return the 1-based line number of text where the exception occurred.
"""
return lineno(self.loc, self.pstr)
@property
def col(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@property
def column(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
def __str__(self) -> str:
if self.pstr:
if self.loc >= len(self.pstr):
foundstr = ", found end of text"
else:
# pull out next word at error location
found_match = _exception_word_extractor.match(self.pstr, self.loc)
if found_match is not None:
found = found_match.group(0)
else:
found = self.pstr[self.loc : self.loc + 1]
foundstr = (", found %r" % found).replace(r"\\", "\\")
else:
foundstr = ""
return "{}{} (at char {}), (line:{}, col:{})".format(
self.msg, foundstr, self.loc, self.lineno, self.column
)
def __repr__(self):
return str(self)
def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
"""
Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
markerString = marker_string if marker_string is not None else markerString
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join(
(line_str[:line_column], markerString, line_str[line_column:])
)
return line_str.strip()
def explain(self, depth=16) -> str:
"""
Method to translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Example::
expr = pp.Word(pp.nums) * 3
try:
expr.parse_string("123 456 A789")
except pp.ParseException as pe:
print(pe.explain(depth=0))
prints::
123 456 A789
^
ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `set_name` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
Note: pyparsing's default truncation of exception tracebacks may also truncate the
stack of expressions that are displayed in the ``explain`` output. To get the full listing
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
"""
return self.explain_exception(self, depth)
markInputline = mark_input_line
class ParseException(ParseBaseException):
"""
Exception thrown when a parse expression doesn't match the input string
Example::
try:
Word(nums).set_name("integer").parse_string("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.column))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
class ParseFatalException(ParseBaseException):
"""
User-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately
"""
class ParseSyntaxException(ParseFatalException):
"""
Just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
class RecursiveGrammarException(Exception):
"""
Exception thrown by :class:`ParserElement.validate` if the
grammar could be left-recursive; parser may need to enable
left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self) -> str:
return "RecursiveGrammarException: {}".format(self.parseElementTrace)

File diff suppressed because it is too large Load diff

View file

@ -1,760 +0,0 @@
# results.py
from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
import pprint
from weakref import ref as wkref
from typing import Tuple, Any
str_type: Tuple[type, ...] = (str, bytes)
_generator_type = type((_ for _ in ()))
class _ParseResultsWithOffset:
__slots__ = ["tup"]
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __getstate__(self):
return self.tup
def __setstate__(self, *args):
self.tup = args[0]
class ParseResults:
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
Example::
integer = Word(nums)
date_str = (integer.set_results_name("year") + '/'
+ integer.set_results_name("month") + '/'
+ integer.set_results_name("day"))
# equivalent form:
# date_str = (integer("year") + '/'
# + integer("month") + '/'
# + integer("day"))
# parse_string returns a ParseResults object
result = date_str.parse_string("1999/12/31")
def test(s, fn=repr):
print("{} -> {}".format(s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
_null_values: Tuple[Any, ...] = (None, [], "", ())
__slots__ = [
"_name",
"_parent",
"_all_names",
"_modal",
"_toklist",
"_tokdict",
"__weakref__",
]
class List(list):
"""
Simple wrapper class to distinguish parsed list results that should be preserved
as actual Python lists, instead of being converted to :class:`ParseResults`:
LBRACK, RBRACK = map(pp.Suppress, "[]")
element = pp.Forward()
item = ppc.integer
element_list = LBRACK + pp.delimited_list(element) + RBRACK
# add parse actions to convert from ParseResults to actual Python collection types
def as_python_list(t):
return pp.ParseResults.List(t.as_list())
element_list.add_parse_action(as_python_list)
element <<= item | element_list
element.run_tests('''
100
[2,3,4]
[[2, 1],3,4]
[(2, 1),3,4]
(2,3,4)
''', post_parse=lambda s, r: (r[0], type(r[0])))
prints:
100
(100, <class 'int'>)
[2,3,4]
([2, 3, 4], <class 'list'>)
[[2, 1],3,4]
([[2, 1], 3, 4], <class 'list'>)
(Used internally by :class:`Group` when `aslist=True`.)
"""
def __new__(cls, contained=None):
if contained is None:
contained = []
if not isinstance(contained, list):
raise TypeError(
"{} may only be constructed with a list,"
" not {}".format(cls.__name__, type(contained).__name__)
)
return list.__new__(cls)
def __new__(cls, toklist=None, name=None, **kwargs):
if isinstance(toklist, ParseResults):
return toklist
self = object.__new__(cls)
self._name = None
self._parent = None
self._all_names = set()
if toklist is None:
self._toklist = []
elif isinstance(toklist, (list, _generator_type)):
self._toklist = (
[toklist[:]]
if isinstance(toklist, ParseResults.List)
else list(toklist)
)
else:
self._toklist = [toklist]
self._tokdict = dict()
return self
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
):
self._modal = modal
if name is not None and name != "":
if isinstance(name, int):
name = str(name)
if not modal:
self._all_names = {name}
self._name = name
if toklist not in self._null_values:
if isinstance(toklist, (str_type, type)):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(
ParseResults(toklist._toklist), 0
)
else:
self[name] = _ParseResultsWithOffset(
ParseResults(toklist[0]), 0
)
self[name]._name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
if toklist is not self:
self[name] = toklist
else:
self._name = name
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self._toklist[i]
else:
if i not in self._all_names:
return self._tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self._tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self._toklist[k] = v
sub = v
else:
self._tokdict[k] = self._tokdict.get(k, list()) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub._parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self._toklist)
del self._toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self._tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
else:
del self._tokdict[i]
def __contains__(self, k) -> bool:
return k in self._tokdict
def __len__(self) -> int:
return len(self._toklist)
def __bool__(self) -> bool:
return not not (self._toklist or self._tokdict)
def __iter__(self) -> Iterator:
return iter(self._toklist)
def __reversed__(self) -> Iterator:
return iter(self._toklist[::-1])
def keys(self):
return iter(self._tokdict)
def values(self):
return (self[k] for k in self.keys())
def items(self):
return ((k, self[k]) for k in self.keys())
def haskeys(self) -> bool:
"""
Since ``keys()`` returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self._tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
def remove_first(tokens):
tokens.pop(0)
numlist.add_parse_action(remove_first)
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + Word(nums)[1, ...]
print(patt.parse_string("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.add_parse_action(remove_LABEL)
print(patt.parse_string("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: 'AAB'
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError(
"pop() got an unexpected keyword argument {!r}".format(k)
)
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, default_value=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``default_value`` or ``None`` if no
``default_value`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return default_value
def insert(self, index, ins_string):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
numlist.add_parse_action(insert_locn)
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
"""
self._toklist.insert(index, ins_string)
# fixup indices in token dictionary
for name, occurrences in self._tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ``ParseResults`` list of elements.
Example::
numlist = Word(nums)[...]
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
numlist.add_parse_action(append_sum)
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
"""
self._toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ``ParseResults`` list of elements.
Example::
patt = Word(alphas)[1, ...]
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
patt.add_parse_action(make_palindrome)
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self._toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self._toklist[:]
self._tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
if name.startswith("__"):
raise AttributeError(name)
return ""
def __add__(self, other) -> "ParseResults":
ret = self.copy()
ret += other
return ret
def __iadd__(self, other) -> "ParseResults":
if other._tokdict:
offset = len(self._toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other._tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0]._parent = wkref(self)
self._toklist += other._toklist
self._all_names |= other._all_names
return self
def __radd__(self, other) -> "ParseResults":
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self) -> str:
return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
def __str__(self) -> str:
return (
"["
+ ", ".join(
[
str(i) if isinstance(i, ParseResults) else repr(i)
for i in self._toklist
]
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self._toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(str(item))
return out
def as_list(self) -> list:
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = Word(alphas)[1, ...]
result = patt.parse_string("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use as_list() to create an actual list
result_list = result.as_list()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [
res.as_list() if isinstance(res, ParseResults) else res
for res in self._toklist
]
def as_dict(self) -> dict:
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.as_dict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
def to_item(obj):
if isinstance(obj, ParseResults):
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
else:
return obj
return dict((k, to_item(v)) for k, v in self.items())
def copy(self) -> "ParseResults":
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults(self._toklist)
ret._tokdict = self._tokdict.copy()
ret._parent = self._parent
ret._all_names |= self._all_names
ret._name = self._name
return ret
def get_name(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = user_data[1, ...]
result = user_info.parse_string("22 111-22-3333 #221B")
for item in result:
print(item.get_name(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self._name:
return self._name
elif self._parent:
par = self._parent()
def find_in_parent(sub):
return next(
(
k
for k, vlist in par._tokdict.items()
for v, loc in vlist
if sub is v
),
None,
)
return find_in_parent(self) if par else None
elif (
len(self) == 1
and len(self._tokdict) == 1
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self._tokdict.keys()))
else:
return None
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parse_string('1999/12/31')
print(result.dump())
prints::
['1999', '/', '12', '/', '31']
- day: '31'
- month: '12'
- year: '1999'
"""
out = []
NL = "\n"
out.append(indent + str(self.as_list()) if include_list else "")
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
if isinstance(v, ParseResults):
if v:
out.append(
v.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
)
)
else:
out.append(str(v))
else:
out.append(repr(v))
if any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append(
"\n{}{}[{}]:\n{}{}{}".format(
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
vv.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
),
)
)
else:
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
str(vv),
)
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimited_list(term)))
result = func.parse_string("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.as_list(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self._toklist,
(
self._tokdict.copy(),
self._parent is not None and self._parent() or None,
self._all_names,
self._name,
),
)
def __setstate__(self, state):
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
self._all_names = set(inAccumNames)
if par is not None:
self._parent = wkref(par)
else:
self._parent = None
def __getnewargs__(self):
return self._toklist, self._name
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None) -> "ParseResults":
"""
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
name-value relations as results names. If an optional ``name`` argument is
given, a nested ``ParseResults`` will be returned.
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
return not isinstance(obj, str_type)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
asList = as_list
asDict = as_dict
getName = get_name
MutableMapping.register(ParseResults)
MutableSequence.register(ParseResults)

View file

@ -1,331 +0,0 @@
# testing.py
from contextlib import contextmanager
import typing
from .core import (
ParserElement,
ParseException,
Keyword,
__diag__,
__compat__,
)
class pyparsing_test:
"""
namespace class for classes useful in writing unit tests
"""
class reset_pyparsing_context:
"""
Context manager to be used when writing unit tests that modify pyparsing config values:
- packrat parsing
- bounded recursion parsing
- default whitespace characters.
- default keyword characters
- literal string auto-conversion class
- __diag__ settings
Example::
with reset_pyparsing_context():
# test that literals used to construct a grammar are automatically suppressed
ParserElement.inlineLiteralsUsing(Suppress)
term = Word(alphas) | Word(nums)
group = Group('(' + term[...] + ')')
# assert that the '()' characters are not included in the parsed tokens
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
# after exiting context manager, literals are converted to Literal expressions again
"""
def __init__(self):
self._save_context = {}
def save(self):
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
self._save_context[
"literal_string_class"
] = ParserElement._literalStringClass
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
if ParserElement._packratEnabled:
self._save_context[
"packrat_cache_size"
] = ParserElement.packrat_cache.size
else:
self._save_context["packrat_cache_size"] = None
self._save_context["packrat_parse"] = ParserElement._parse
self._save_context[
"recursion_enabled"
] = ParserElement._left_recursion_enabled
self._save_context["__diag__"] = {
name: getattr(__diag__, name) for name in __diag__._all_names
}
self._save_context["__compat__"] = {
"collect_all_And_tokens": __compat__.collect_all_And_tokens
}
return self
def restore(self):
# reset pyparsing global state
if (
ParserElement.DEFAULT_WHITE_CHARS
!= self._save_context["default_whitespace"]
):
ParserElement.set_default_whitespace_chars(
self._save_context["default_whitespace"]
)
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
ParserElement.inlineLiteralsUsing(
self._save_context["literal_string_class"]
)
for name, value in self._save_context["__diag__"].items():
(__diag__.enable if value else __diag__.disable)(name)
ParserElement._packratEnabled = False
if self._save_context["packrat_enabled"]:
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
else:
ParserElement._parse = self._save_context["packrat_parse"]
ParserElement._left_recursion_enabled = self._save_context[
"recursion_enabled"
]
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
return self
def copy(self):
ret = type(self)()
ret._save_context.update(self._save_context)
return ret
def __enter__(self):
return self.save()
def __exit__(self, *args):
self.restore()
class TestParseResultsAsserts:
"""
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
"""
def assertParseResultsEquals(
self, result, expected_list=None, expected_dict=None, msg=None
):
"""
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
and compare any defined results names with an optional ``expected_dict``.
"""
if expected_list is not None:
self.assertEqual(expected_list, result.as_list(), msg=msg)
if expected_dict is not None:
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
def assertParseAndCheckList(
self, expr, test_string, expected_list, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
"""
result = expr.parse_string(test_string, parse_all=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
def assertParseAndCheckDict(
self, expr, test_string, expected_dict, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
"""
result = expr.parse_string(test_string, parseAll=True)
if verbose:
print(result.dump())
else:
print(result.as_list())
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
def assertRunTestResults(
self, run_tests_report, expected_parse_results=None, msg=None
):
"""
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
Finally, asserts that the overall ``runTests()`` success value is ``True``.
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
"""
run_test_success, run_test_results = run_tests_report
if expected_parse_results is not None:
merged = [
(*rpt, expected)
for rpt, expected in zip(run_test_results, expected_parse_results)
]
for test_string, result, expected in merged:
# expected should be a tuple containing a list and/or a dict or an exception,
# and optional failure message string
# an empty tuple will skip any result validation
fail_msg = next(
(exp for exp in expected if isinstance(exp, str)), None
)
expected_exception = next(
(
exp
for exp in expected
if isinstance(exp, type) and issubclass(exp, Exception)
),
None,
)
if expected_exception is not None:
with self.assertRaises(
expected_exception=expected_exception, msg=fail_msg or msg
):
if isinstance(result, Exception):
raise result
else:
expected_list = next(
(exp for exp in expected if isinstance(exp, list)), None
)
expected_dict = next(
(exp for exp in expected if isinstance(exp, dict)), None
)
if (expected_list, expected_dict) != (None, None):
self.assertParseResultsEquals(
result,
expected_list=expected_list,
expected_dict=expected_dict,
msg=fail_msg or msg,
)
else:
# warning here maybe?
print("no validation for {!r}".format(test_string))
# do this last, in case some specific test results can be reported instead
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
@contextmanager
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
with self.assertRaises(exc_type, msg=msg):
yield
@staticmethod
def with_line_numbers(
s: str,
start_line: typing.Optional[int] = None,
end_line: typing.Optional[int] = None,
expand_tabs: bool = True,
eol_mark: str = "|",
mark_spaces: typing.Optional[str] = None,
mark_control: typing.Optional[str] = None,
) -> str:
"""
Helpful method for debugging a parser - prints a string with line and column numbers.
(Line and column numbers are 1-based.)
:param s: tuple(bool, str - string to be printed with line and column numbers
:param start_line: int - (optional) starting line number in s to print (default=1)
:param end_line: int - (optional) ending line number in s to print (default=len(s))
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
:param mark_spaces: str - (optional) special character to display in place of spaces
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
character; valid values:
- "unicode" - replaces control chars with Unicode symbols, such as "" and ""
- any single character string - replace control characters with given string
- None (default) - string is displayed as-is
:return: str - input string with leading line numbers and column number headers
"""
if expand_tabs:
s = s.expandtabs()
if mark_control is not None:
if mark_control == "unicode":
tbl = str.maketrans(
{c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
| {127: 0x2421}
)
eol_mark = ""
else:
tbl = str.maketrans(
{c: mark_control for c in list(range(0, 32)) + [127]}
)
s = s.translate(tbl)
if mark_spaces is not None and mark_spaces != " ":
if mark_spaces == "unicode":
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
s = s.translate(tbl)
else:
s = s.replace(" ", mark_spaces)
if start_line is None:
start_line = 1
if end_line is None:
end_line = len(s)
end_line = min(end_line, len(s))
start_line = min(max(1, start_line), end_line)
if mark_control != "unicode":
s_lines = s.splitlines()[start_line - 1 : end_line]
else:
s_lines = [line + "" for line in s.split("")[start_line - 1 : end_line]]
if not s_lines:
return ""
lineno_width = len(str(end_line))
max_line_len = max(len(line) for line in s_lines)
lead = " " * (lineno_width + 1)
if max_line_len >= 99:
header0 = (
lead
+ "".join(
"{}{}".format(" " * 99, (i + 1) % 100)
for i in range(max(max_line_len // 100, 1))
)
+ "\n"
)
else:
header0 = ""
header1 = (
header0
+ lead
+ "".join(
" {}".format((i + 1) % 10)
for i in range(-(-max_line_len // 10))
)
+ "\n"
)
header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
return (
header1
+ header2
+ "\n".join(
"{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
for i, line in enumerate(s_lines, start=start_line)
)
+ "\n"
)

View file

@ -1,352 +0,0 @@
# unicode.py
import sys
from itertools import filterfalse
from typing import List, Tuple, Union
class _lazyclassproperty:
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, "_intern") or any(
cls._intern is getattr(superclass, "_intern", [])
for superclass in cls.__mro__[1:]
):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
class unicode_set:
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``. Ranges can be specified using
2-tuples or a 1-tuple, such as::
_ranges = [
(0x0020, 0x007e),
(0x00a0, 0x00ff),
(0x0100,),
]
Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges: UnicodeRangeList = []
@_lazyclassproperty
def _chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in getattr(cc, "_ranges", ()):
ret.extend(range(rr[0], rr[-1] + 1))
return [chr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return "".join(filter(str.isalpha, cls._chars_for_ranges))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return "".join(filter(str.isdigit, cls._chars_for_ranges))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
@_lazyclassproperty
def identchars(cls):
"all characters in this range that are valid identifier characters, plus underscore '_'"
return "".join(
sorted(
set(
"".join(filter(str.isidentifier, cls._chars_for_ranges))
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
+ "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
+ "_"
)
)
)
@_lazyclassproperty
def identbodychars(cls):
"""
all characters in this range that are valid identifier body characters,
plus the digits 0-9
"""
return "".join(
sorted(
set(
cls.identchars
+ "0123456789"
+ "".join(
[c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
)
)
)
)
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
# fmt: off
# define ranges in language character sets
_ranges: UnicodeRangeList = [
(0x0020, sys.maxunicode),
]
class BasicMultilingualPlane(unicode_set):
"Unicode set for the Basic Multilingual Plane"
_ranges: UnicodeRangeList = [
(0x0020, 0xFFFF),
]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0020, 0x007E),
(0x00A0, 0x00FF),
]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0100, 0x017F),
]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0180, 0x024F),
]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges: UnicodeRangeList = [
(0x0342, 0x0345),
(0x0370, 0x0377),
(0x037A, 0x037F),
(0x0384, 0x038A),
(0x038C,),
(0x038E, 0x03A1),
(0x03A3, 0x03E1),
(0x03F0, 0x03FF),
(0x1D26, 0x1D2A),
(0x1D5E,),
(0x1D60,),
(0x1D66, 0x1D6A),
(0x1F00, 0x1F15),
(0x1F18, 0x1F1D),
(0x1F20, 0x1F45),
(0x1F48, 0x1F4D),
(0x1F50, 0x1F57),
(0x1F59,),
(0x1F5B,),
(0x1F5D,),
(0x1F5F, 0x1F7D),
(0x1F80, 0x1FB4),
(0x1FB6, 0x1FC4),
(0x1FC6, 0x1FD3),
(0x1FD6, 0x1FDB),
(0x1FDD, 0x1FEF),
(0x1FF2, 0x1FF4),
(0x1FF6, 0x1FFE),
(0x2129,),
(0x2719, 0x271A),
(0xAB65,),
(0x10140, 0x1018D),
(0x101A0,),
(0x1D200, 0x1D245),
(0x1F7A1, 0x1F7A7),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0400, 0x052F),
(0x1C80, 0x1C88),
(0x1D2B,),
(0x1D78,),
(0x2DE0, 0x2DFF),
(0xA640, 0xA672),
(0xA674, 0xA69F),
(0xFE2E, 0xFE2F),
]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x2E80, 0x2E99),
(0x2E9B, 0x2EF3),
(0x31C0, 0x31E3),
(0x3400, 0x4DB5),
(0x4E00, 0x9FEF),
(0xA700, 0xA707),
(0xF900, 0xFA6D),
(0xFA70, 0xFAD9),
(0x16FE2, 0x16FE3),
(0x1F210, 0x1F212),
(0x1F214, 0x1F23B),
(0x1F240, 0x1F248),
(0x20000, 0x2A6D6),
(0x2A700, 0x2B734),
(0x2B740, 0x2B81D),
(0x2B820, 0x2CEA1),
(0x2CEB0, 0x2EBE0),
(0x2F800, 0x2FA1D),
]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges: UnicodeRangeList = []
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x4E00, 0x9FBF),
(0x3000, 0x303F),
]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x3041, 0x3096),
(0x3099, 0x30A0),
(0x30FC,),
(0xFF70,),
(0x1B001,),
(0x1B150, 0x1B152),
(0x1F200,),
]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x3099, 0x309C),
(0x30A0, 0x30FF),
(0x31F0, 0x31FF),
(0x32D0, 0x32FE),
(0xFF65, 0xFF9F),
(0x1B000,),
(0x1B164, 0x1B167),
(0x1F201, 0x1F202),
(0x1F213,),
]
class Hangul(unicode_set):
"Unicode set for Hangul (Korean) Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x1100, 0x11FF),
(0x302E, 0x302F),
(0x3131, 0x318E),
(0x3200, 0x321C),
(0x3260, 0x327B),
(0x327E,),
(0xA960, 0xA97C),
(0xAC00, 0xD7A3),
(0xD7B0, 0xD7C6),
(0xD7CB, 0xD7FB),
(0xFFA0, 0xFFBE),
(0xFFC2, 0xFFC7),
(0xFFCA, 0xFFCF),
(0xFFD2, 0xFFD7),
(0xFFDA, 0xFFDC),
]
Korean = Hangul
class CJK(Chinese, Japanese, Hangul):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0E01, 0x0E3A),
(0x0E3F, 0x0E5B)
]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0600, 0x061B),
(0x061E, 0x06FF),
(0x0700, 0x077F),
]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0591, 0x05C7),
(0x05D0, 0x05EA),
(0x05EF, 0x05F4),
(0xFB1D, 0xFB36),
(0xFB38, 0xFB3C),
(0xFB3E,),
(0xFB40, 0xFB41),
(0xFB43, 0xFB44),
(0xFB46, 0xFB4F),
]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges: UnicodeRangeList = [
(0x0900, 0x097F),
(0xA8E0, 0xA8FF)
]
# fmt: on
pyparsing_unicode.Japanese._ranges = (
pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges
)
pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
# add language identifiers using language Unicode
pyparsing_unicode.العربية = pyparsing_unicode.Arabic
pyparsing_unicode.中文 = pyparsing_unicode.Chinese
pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
pyparsing_unicode.한국어 = pyparsing_unicode.Korean
pyparsing_unicode.ไทย = pyparsing_unicode.Thai
pyparsing_unicode.वनगर = pyparsing_unicode.Devanagari

View file

@ -1,235 +0,0 @@
# util.py
import warnings
import types
import collections
import itertools
from functools import lru_cache
from typing import List, Union, Iterable
_bslash = chr(92)
class __config_flags:
"""Internal class for defining compatibility and debugging flags"""
_all_names: List[str] = []
_fixed_names: List[str] = []
_type_desc = "configuration"
@classmethod
def _set(cls, dname, value):
if dname in cls._fixed_names:
warnings.warn(
"{}.{} {} is {} and cannot be overridden".format(
cls.__name__,
dname,
cls._type_desc,
str(getattr(cls, dname)).upper(),
)
)
return
if dname in cls._all_names:
setattr(cls, dname, value)
else:
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
enable = classmethod(lambda cls, name: cls._set(name, True))
disable = classmethod(lambda cls, name: cls._set(name, False))
@lru_cache(maxsize=128)
def col(loc: int, strg: str) -> int:
"""
Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
@lru_cache(maxsize=128)
def lineno(loc: int, strg: str) -> int:
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
@lru_cache(maxsize=128)
def line(loc: int, strg: str) -> str:
"""
Returns the line of text containing loc within a string, counting newlines as line separators.
"""
last_cr = strg.rfind("\n", 0, loc)
next_cr = strg.find("\n", loc)
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
class _UnboundedCache:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
def clear(_):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class _FifoCache:
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = collections.OrderedDict()
cache_get = cache.get
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
while len(cache) > size:
cache.popitem(last=False)
def clear(_):
cache.clear()
self.size = size
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class LRUMemo:
"""
A memoizing mapping that retains `capacity` deleted items
The memo tracks retained items by their access order; once `capacity` items
are retained, the least recently used item is discarded.
"""
def __init__(self, capacity):
self._capacity = capacity
self._active = {}
self._memory = collections.OrderedDict()
def __getitem__(self, key):
try:
return self._active[key]
except KeyError:
self._memory.move_to_end(key)
return self._memory[key]
def __setitem__(self, key, value):
self._memory.pop(key, None)
self._active[key] = value
def __delitem__(self, key):
try:
value = self._active.pop(key)
except KeyError:
pass
else:
while len(self._memory) >= self._capacity:
self._memory.popitem(last=False)
self._memory[key] = value
def clear(self):
self._active.clear()
self._memory.clear()
class UnboundedMemo(dict):
"""
A memoizing mapping that retains all deleted items
"""
def __delitem__(self, key):
pass
def _escape_regex_range_chars(s: str) -> str:
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
def _collapse_string_to_ranges(
s: Union[str, Iterable[str]], re_escape: bool = True
) -> str:
def is_consecutive(c):
c_int = ord(c)
is_consecutive.prev, prev = c_int, is_consecutive.prev
if c_int - prev > 1:
is_consecutive.value = next(is_consecutive.counter)
return is_consecutive.value
is_consecutive.prev = 0
is_consecutive.counter = itertools.count()
is_consecutive.value = -1
def escape_re_range_char(c):
return "\\" + c if c in r"\^-][" else c
def no_escape_re_range_char(c):
return c
if not re_escape:
escape_re_range_char = no_escape_re_range_char
ret = []
s = "".join(sorted(set(s)))
if len(s) > 3:
for _, chars in itertools.groupby(s, key=is_consecutive):
first = last = next(chars)
last = collections.deque(
itertools.chain(iter([last]), chars), maxlen=1
).pop()
if first == last:
ret.append(escape_re_range_char(first))
else:
sep = "" if ord(last) == ord(first) + 1 else "-"
ret.append(
"{}{}{}".format(
escape_re_range_char(first), sep, escape_re_range_char(last)
)
)
else:
ret = [escape_re_range_char(c) for c in s]
return "".join(ret)
def _flatten(ll: list) -> list:
ret = []
for i in ll:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,11 @@
packaging==21.3 packaging==23.0
pyparsing==3.0.9
appdirs==1.4.3 platformdirs==2.6.2
# required for platformdirs on Python < 3.8
typing_extensions==4.4.0
jaraco.text==3.7.0 jaraco.text==3.7.0
# required for jaraco.text on older Pythons # required for jaraco.text on older Pythons
importlib_resources==5.4.0 importlib_resources==5.10.2
# required for importlib_resources on older Pythons # required for importlib_resources on older Pythons
zipp==3.7.0 zipp==3.7.0

Some files were not shown because too many files have changed in this diff Show more