Merge branch 'feature/AddTmdbCards' into dev
|
@ -19,6 +19,9 @@
|
|||
* Add to config/media-process/File Handling, "Rename TBA" and "Rename any"
|
||||
* Add config to change media process log message if there is no media to process
|
||||
* Change view-show text "invalid timeformat" to "time unknown"
|
||||
* Add menu Shows/"TMDB Cards"
|
||||
* Add a persons available socials (Youtube, LinkedIn, Reddit, Fansite, TikTok, Wikidata)
|
||||
* Change use TVDb genres on view-show if config/General/Interface/"Enable IMDb info" is disabled
|
||||
|
||||
|
||||
### 3.28.0 (2023-04-12 13:05:00 UTC)
|
||||
|
|
|
@ -17,6 +17,7 @@ Libs with customisations...
|
|||
/lib/hachoir_parser/guess.py
|
||||
/lib/hachoir_parser/misc/torrent.py
|
||||
/lib/imdbpie
|
||||
/lib/language_data/names.py
|
||||
/lib/lockfile/mkdirlockfile.py
|
||||
/lib/rtorrent
|
||||
/lib/scandir/scandir.py
|
||||
|
|
|
@ -1312,6 +1312,9 @@ div.formpaginate{
|
|||
width:480px;
|
||||
margin-top:0
|
||||
}
|
||||
#addShowForm #nameToSearch.select2{
|
||||
width:428px;
|
||||
}
|
||||
#addShowForm #nameToSearch.wide{
|
||||
width:591px;
|
||||
}
|
||||
|
@ -1439,12 +1442,16 @@ home_browseShows.tmpl
|
|||
padding-top:2px
|
||||
}
|
||||
|
||||
.show-card-inner .rating{
|
||||
margin-right:3px;
|
||||
}
|
||||
|
||||
.show-card-inner .heart.icon-glyph{
|
||||
display:inline-block;
|
||||
float:none;
|
||||
height:15px;
|
||||
width:15px;
|
||||
margin:0 -3px -2px 2px
|
||||
margin:0 -3px -2px -1px
|
||||
}
|
||||
|
||||
.show-card-inner p, .show-card-inner i{
|
||||
|
@ -3786,6 +3793,13 @@ option.flag{
|
|||
background-position:10px 50%
|
||||
}
|
||||
|
||||
#select2-infosrc-lang-select-container .flag,
|
||||
#select2-infosrc-lang-select-results .flag{
|
||||
padding-left:25px;
|
||||
background-repeat:no-repeat;
|
||||
background-position:0 50%
|
||||
}
|
||||
|
||||
/* Anime section for editShow */
|
||||
.anigrouplists-wrapper{
|
||||
height:auto;
|
||||
|
|
BIN
gui/slick/images/_placeholder16.png
Normal file
After Width: | Height: | Size: 227 B |
BIN
gui/slick/images/flags/hy.png
Normal file
After Width: | Height: | Size: 212 B |
BIN
gui/slick/images/flags/ka.png
Normal file
After Width: | Height: | Size: 287 B |
BIN
gui/slick/images/flags/nb.png
Normal file
After Width: | Height: | Size: 397 B |
BIN
gui/slick/images/flags/nn.png
Normal file
After Width: | Height: | Size: 397 B |
BIN
gui/slick/images/flags/sq.png
Normal file
After Width: | Height: | Size: 324 B |
Before Width: | Height: | Size: 303 B |
Before Width: | Height: | Size: 545 B After Width: | Height: | Size: 303 B |
BIN
gui/slick/images/linkedin16.png
Normal file
After Width: | Height: | Size: 399 B |
BIN
gui/slick/images/reddit16.png
Normal file
After Width: | Height: | Size: 630 B |
BIN
gui/slick/images/tiktok16.png
Normal file
After Width: | Height: | Size: 622 B |
BIN
gui/slick/images/wikidata16.png
Normal file
After Width: | Height: | Size: 125 B |
BIN
gui/slick/images/youtube16.png
Normal file
After Width: | Height: | Size: 214 B |
|
@ -77,7 +77,7 @@ def param(visible=True, rid=None, cache_person=None, cache_char=None, person=Non
|
|||
#elif $PersonGenders.male == $cur_person.gender#
|
||||
#set $gender = 'himself'
|
||||
#end if#
|
||||
#set $name = ($cur_person.name, $gender)[$rc_clean.sub('', $cur_person.name.lower()) == $rc_clean.sub('', $character.name.lower())]
|
||||
#set $name = ($cur_person.name, $gender)[$rc_clean.sub('', $cur_person.name.lower()) == $rc_clean.sub('', ($character.name or 'unknown name').lower())]
|
||||
<a href="$sbRoot/home/person?$param(person=$cur_person)">$name</a>#if 2 <= $num_people and $cur_enum + 1 == $num_people# and #elif 2 < $num_people and $cur_enum < $num_people#<span>, </span>#end if#
|
||||
#end for
|
||||
</div>
|
||||
|
|
|
@ -176,6 +176,9 @@
|
|||
#if not $genres_done and $show_obj.genre
|
||||
#for $genre in $show_obj.genre.split('|')
|
||||
#set $genres_done = True
|
||||
#if $TVINFO_TVDB == $show_obj.tvid
|
||||
#set $genre = '<a href="%s" target="_blank" title="View other popular %s shows on thetvdb.com" class="addQTip">%s</a>' % (anon_url('https://thetvdb.com/genres/', $genre), $genre, $genre)
|
||||
#end if
|
||||
<span class="label">$genre</span>
|
||||
#end for#
|
||||
#end if
|
||||
|
|
|
@ -28,6 +28,24 @@
|
|||
<script type="text/javascript" src="$sbRoot/js/qualityChooser.js?v=$sbPID"></script>
|
||||
<script type="text/javascript" src="$sbRoot/js/editShow.js?v=$sbPID"></script>
|
||||
<script type="text/javascript" src="$sbRoot/js/livepanel.js?v=$sbPID"></script>
|
||||
<script src="$sbRoot/js/lib/select2.full.min.js"></script>
|
||||
<link href="$sbRoot/css/lib/select2.css" rel="stylesheet">
|
||||
|
||||
<style>
|
||||
.select2-container{height:32px; font-size:12px; margin-right:6px}
|
||||
.select2-container .select2-selection--single{height:30px}
|
||||
.select2-results__group{color: #eee; background-color: rgb(51,51,51)}
|
||||
.select2-results__options .select2-results__option{color: #222; background-color: #ddd}
|
||||
.select2-results__options .select2-results__option .ended{color: #888}
|
||||
.select2-container--default .select2-results > .select2-results__options{max-height: 300px}
|
||||
#select2-infosrc-lang-select-results .select2-results__option,
|
||||
#select2-infosrc-lang-select-results .select2-results__group{padding-top: 2px !important; padding-bottom:2px !important}
|
||||
#select2-infosrc-lang-select-results .select2-results__option--highlighted.select2-results__option--selectable .ended{color:white}
|
||||
#select2-infosrc-lang-select-results .select2-results__option--selected,
|
||||
#select2-infosrc-lang-select-results .select2-results__option--selected span{color:rgb(143, 21, 21) !important}
|
||||
#select2-infosrc-lang-select-results span.flag{width:100%; height:100%; display:block}
|
||||
</style>
|
||||
|
||||
#if $varExists('header')
|
||||
<h1 class="header"><span class="grey-text">Edit </span>$header</h1>
|
||||
#else
|
||||
|
@ -244,10 +262,10 @@
|
|||
</div>
|
||||
|
||||
<div class="field-pair">
|
||||
<label for="infosrc-lang-select-edit">
|
||||
<label for="infosrc-lang-select">
|
||||
<span class="component-title">Info language</span>
|
||||
<span class="component-desc">
|
||||
<select name="tvinfo_lang" id="infosrc-lang-select-edit" class="form-control form-control-inline input-sm"></select>
|
||||
<select name="tvinfo_lang" id="infosrc-lang-select" class="form-control form-control-inline input-sm"></select>
|
||||
<span>fetch show information in this language</span>
|
||||
</span>
|
||||
</label>
|
||||
|
|
|
@ -10,8 +10,10 @@
|
|||
#set $mode = $kwargs and $kwargs.get('mode', '')
|
||||
#set $use_network = $kwargs.get('use_networks', False)
|
||||
#set $use_returning = 'returning' == mode
|
||||
#set $use_votes = $kwargs and $kwargs.get('use_votes', True)
|
||||
#set $use_filter = $kwargs and $kwargs.get('use_filter', True)
|
||||
#set $use_ratings = $kwargs and $kwargs.get('use_ratings', True)
|
||||
#set $use_votes = $kwargs and $kwargs.get('use_votes', True)
|
||||
#set $term_vote = $kwargs and $kwargs.get('term_vote', 'Votes')
|
||||
##
|
||||
#set global $title='Browse %s Shows' % $browse_type
|
||||
#set global $header='Browse Shows'
|
||||
|
@ -236,7 +238,8 @@ $(document).ready(function(){
|
|||
$('#showfilter').on('change', function(){
|
||||
var filterValue = this.value;
|
||||
if (-1 == filterValue.indexOf('trakt') && -1 == filterValue.indexOf('imdb') && -1 == filterValue.indexOf('mc_')
|
||||
&& -1 == filterValue.indexOf('tvc_') && -1 == filterValue.indexOf('tvm_')
|
||||
&& -1 == filterValue.indexOf('tmdb_') && -1 == filterValue.indexOf('tvc_')
|
||||
&& -1 == filterValue.indexOf('tvm_')
|
||||
&& -1 == filterValue.indexOf('ne_') && -1 == filterValue.indexOf('_ne')
|
||||
&& -1 == filterValue.indexOf('default')) {
|
||||
var el$ = $('#container')
|
||||
|
@ -322,16 +325,16 @@ $(document).ready(function(){
|
|||
</optgroup>
|
||||
<optgroup label="Sort by">
|
||||
<option value="by_name"#if 'by_name' in $saved_showsort_sortby and not $reset_showsort_sortby#$selected>> #else#>#end if#Name</option>
|
||||
## omit for TVMaze as original order == First Aired
|
||||
#if 'TVmaze' not in $browse_type
|
||||
## omit for TVMaze as original order == First Aired
|
||||
#if 'TVmaze' not in $browse_type
|
||||
<option value="by_order"#if 'by_order' in $saved_showsort_sortby or $reset_showsort_sortby#$selected>> #else#>#end if#Original</option>
|
||||
#end if
|
||||
#end if
|
||||
<option value="by_premiered"#if 'by_premiered' in $saved_showsort_sortby and not $reset_showsort_sortby#$selected>> #else#>#end if#First aired</option>
|
||||
#if $use_returning
|
||||
<option value="by_returning"#if 'by_returning' in $saved_showsort_sortby and not $reset_showsort_sortby#$selected>> #else#>#end if#Returning</option>
|
||||
#end if
|
||||
#if $use_votes
|
||||
<option value="by_votes"#if 'by_votes' in $saved_showsort_sortby#$selected>> #else#>#end if#Votes</option>
|
||||
<option value="by_votes"#if 'by_votes' in $saved_showsort_sortby#$selected>> #else#>#end if#$term_vote</option>
|
||||
#end if
|
||||
#if $use_ratings
|
||||
<option value="by_rating"#if 'by_rating' in $saved_showsort_sortby#$selected>> #else#>#end if#% Rating</option>
|
||||
|
@ -342,13 +345,13 @@ $(document).ready(function(){
|
|||
</optgroup>
|
||||
</select>
|
||||
|
||||
#if 'Ani' not in $browse_type
|
||||
#if 'Ani' not in $browse_type and $use_filter
|
||||
<select id="showfilter" class="form-control form-control-inline input-sm">
|
||||
#set $selected = ' class="selected" selected="selected"'
|
||||
#if 'Trakt' == $browse_type
|
||||
<optgroup label="Trakt">
|
||||
<option value="trakt_anticipated"#echo ('', selected)['anticipated' == $mode]#>Anticipating</option>
|
||||
<option value="trakt_newseasons"#echo ('', selected)['newseasons' == $mode]#>New Seasons</option>
|
||||
<option value="trakt_newseasons"#echo ('', selected)['returning' == $mode]#>New Seasons</option>
|
||||
<option value="trakt_newshows"#echo ('', selected)['newshows' == $mode]#>New Shows</option>
|
||||
<option value="trakt_popular"#echo ('', selected)['popular' == $mode]#>Popular</option>
|
||||
<option value="trakt_trending"#echo ('', selected)['trending' == $mode]#>Trending</option>
|
||||
|
@ -423,6 +426,14 @@ $(document).ready(function(){
|
|||
<option value="mc_newseries?more=1"#echo ('', selected + ' disabled')[mode.endswith('more')]#>... list more</option>
|
||||
#end if
|
||||
</optgroup>
|
||||
#elif 'TMDB' == $browse_type
|
||||
<optgroup label="TMDB">
|
||||
<option value="tmdb_upcoming"#echo ('', selected)['upcoming' == $mode]#>Upcoming</option>
|
||||
<option value="tmdb_popular"#echo ('', selected)['popular' == $mode]#>Popular</option>
|
||||
<option value="tmdb_toprated"#echo ('', selected)['toprated' == $mode]#>Top rated</option>
|
||||
<option value="tmdb_trending_today"#echo ('', selected)['trending_today' == $mode]#>Trending today</option>
|
||||
<option value="tmdb_trending_week"#echo ('', selected)['trending_week' == $mode]#>Trending this week</option>
|
||||
</optgroup>
|
||||
#elif 'TVCalendar' == $browse_type
|
||||
<optgroup label="TVCalendar">
|
||||
#for $page in $kwargs.get('pages') or []
|
||||
|
@ -466,10 +477,10 @@ $(document).ready(function(){
|
|||
#set $poster_id += 1
|
||||
|
||||
#set $title_html = $this_show['title'].replace('"', '"').replace("'", ''')
|
||||
#if 'newseasons' == $mode
|
||||
#if 'returning' == $mode
|
||||
#set $overview = '%s: %s' % (
|
||||
('Season %s' % $this_show['episode_season'], 'Brand-new')[1 == $this_show['episode_season']],
|
||||
($this_show['overview'], $this_show['episode_overview'])[any($this_show['episode_overview']) and 1 != $this_show['episode_season']])
|
||||
'Season %s' % $this_show['episode_season'],
|
||||
$this_show['episode_overview'] or $this_show['overview'])
|
||||
#else
|
||||
#set $overview = $this_show['overview']
|
||||
#end if
|
||||
|
@ -477,16 +488,18 @@ $(document).ready(function(){
|
|||
#set $show_id = $this_show.get('show_id')
|
||||
#set $known = ('not', '')[bool($this_show.get('indb'))]
|
||||
#set $hide = ('', '%shide ' % ('', 'to-')['.hide' in $saved_showsort_view])[bool($this_show.get('hide'))]
|
||||
#if $use_ratings:
|
||||
#set $data_rating = $try_float($this_show['rating'])
|
||||
#end if
|
||||
|
||||
<div class="show-card ${hide}${known}inlibrary" data-name="#echo re.sub(r'([\'\"])', r'', $this_show['title'])#" data_id="$show_id"#if $use_ratings# data-rating="$data_rating"#end if##if $use_votes# data-votes="$this_show['votes']"#end if# data-premiered="$this_show['premiered']"#if $use_returning# data-returning="$this_show['returning']"#end if# data-order="$this_show['order']"#if $use_network# data-network="$this_show['network']"#end if#>
|
||||
<div class="show-card ${hide}${known}inlibrary" data-name="#echo re.sub(r'([\'\"])', r'', $this_show['title'])#" data_id="$show_id"#if $use_ratings# data-rating="$data_rating"#end if##if $use_votes# data-votes="$this_show['votes']"#end if# data-premiered="$this_show['ord_premiered']"#if $use_returning# data-returning="$this_show['ord_returning']"#end if# data-order="$this_show['order']"#if $use_network# data-network="$this_show['network']"#end if#>
|
||||
<div class="show-card-inner">
|
||||
<div class="browse-image">
|
||||
<a class="browse-image" href="<%= anon_url(this_show['url_src_db']) %>" target="_blank"
|
||||
title="<span style='color: rgb(66, 139, 202)'>$re.sub(r'(?m)\s+\((?:19|20)\d\d\)\s*$', '', $title_html)</span>
|
||||
|
||||
#if $this_show['genres']#<br><div style='font-weight:bold'>(<em>$this_show['genres']</em>)</div>#end if#
|
||||
#if $kwargs and $use_returning#<span style='display:block;clear:both;font-weight:bold;font-size:0.9em;color:#888'><em>Season $this_show['episode_season'] returns $this_show['returning_str']</em></span>#end if#
|
||||
#if $kwargs and $use_returning#<span style='display:block;clear:both;font-weight:bold;font-size:0.9em;color:#888'><em>Season $this_show['episode_season'] return#echo ('s', 'ed')[$this_show['return_past']]# $this_show['str_returning']</em></span>#end if#
|
||||
#if $this_show.get('country') or $this_show.get('language')
|
||||
<p style='line-height:15px;margin-bottom:2px'>
|
||||
#if $this_show.get('country')
|
||||
|
@ -498,7 +511,7 @@ $(document).ready(function(){
|
|||
</p>
|
||||
#end if
|
||||
<p style='margin:0 0 2px'>#echo re.sub(r'([,\.!][^,\.!]*?)$', '...', re.sub(r'([!\?\.])(?=\w)', r'\1 ', $overview)).replace('.....', '...')#</p>
|
||||
<p>#if $this_show['premiered_str']#<span style='font-weight:bold;font-size:0.9em;color:#888'><em>#if $kwargs and 'newseasons' == $mode#Air#else#First air#end if##echo ('s', 'ed')[$this_show['when_past']]#: $this_show['premiered_str']</em></span>#end if#
|
||||
<p>#if $this_show['str_premiered']#<span style='font-weight:bold;font-size:0.9em;color:#888'><em>#if 'Trakt' == $browse_type and $kwargs and 'returning' == $mode#Air#else#First air#end if##echo ('s', 'ed')[$this_show['started_past']]#: $this_show['str_premiered']</em></span>#end if#
|
||||
#if $this_show.get('ended_str')# - <span style='font-weight:bold;font-size:0.9em;color:#888'><em>Ended: $this_show['ended_str']</em></span>#end if#
|
||||
#if $this_show.get('network')#<span style='display:block;clear:both;font-weight:bold;font-size:0.9em;color:#888'><em>On: $this_show['network']</em></span>#end if#
|
||||
</p>
|
||||
|
@ -521,7 +534,7 @@ $(document).ready(function(){
|
|||
#end if
|
||||
<div class="clearfix">
|
||||
#if $use_ratings or $use_votes
|
||||
<p>#if $use_ratings#$this_show['rating']#if $re.search(r'^\d+(\.\d+)?$', (str($this_show['rating'])))#%#end if##end if##if $use_votes#<i class="heart icon-glyph"></i><i>$this_show['votes'] votes</i>#end if#</p>#slurp#
|
||||
<p>#if $use_ratings#<span class="rating">$this_show['rating']#if $re.search(r'^\d+(\.\d+)?$', (str($this_show['rating'])))#%</span>#end if##end if##if $use_votes#<i class="heart icon-glyph"></i><i>$this_show['votes'] $term_vote.lower()</i>#end if#</p>#slurp#
|
||||
#end if
|
||||
#if 'url_tvdb' in $this_show and $this_show['url_tvdb']
|
||||
<a class="service" href="<%= anon_url(this_show['url_tvdb']) %>" onclick="window.open(this.href, '_blank'); return false;"
|
||||
|
@ -539,7 +552,7 @@ $(document).ready(function(){
|
|||
</div>
|
||||
</div>
|
||||
<div class="ui-progressbar ui-widget ui-widget-content ui-corner-all">
|
||||
#set $state = 'progress-%s0" title="%s"' % (('2', 'Upcoming'), ('8', 'Started'))[$this_show['when_past']]
|
||||
#set $state = 'progress-%s0" title="%s"' % (('2', 'Upcoming'), ('8', 'Started'))[$this_show['started_past']]
|
||||
<div style="width:102%" class="ui-progressbar-value ui-widget-header ui-corner-left ui-corner-right $state></div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -35,6 +35,23 @@
|
|||
<script type="text/javascript" src="$sbRoot/js/qualityChooser.js?v=$sbPID"></script>
|
||||
<script type="text/javascript" src="$sbRoot/js/newShow.js?v=$sbPID"></script>
|
||||
<script type="text/javascript" src="$sbRoot/js/addShowOptions.js?v=$sbPID"></script>
|
||||
<script src="$sbRoot/js/lib/select2.full.min.js"></script>
|
||||
<link href="$sbRoot/css/lib/select2.css" rel="stylesheet">
|
||||
|
||||
<style>
|
||||
.select2-container{height:32px; font-size:12px}
|
||||
.select2-container .select2-selection--single{height:30px}
|
||||
.select2-results__group{color: #eee; background-color: rgb(51,51,51)}
|
||||
.select2-results__options .select2-results__option{color: #222; background-color: #ddd}
|
||||
.select2-results__options .select2-results__option .ended{color: #888}
|
||||
.select2-container--default .select2-results > .select2-results__options{max-height: 300px}
|
||||
#select2-infosrc-lang-select-results .select2-results__option,
|
||||
#select2-infosrc-lang-select-results .select2-results__group{padding-top: 2px !important; padding-bottom:2px !important}
|
||||
#select2-infosrc-lang-select-results .select2-results__option--highlighted.select2-results__option--selectable .ended{color:white}
|
||||
#select2-infosrc-lang-select-results .select2-results__option--selected,
|
||||
#select2-infosrc-lang-select-results .select2-results__option--selected span{color:rgb(143, 21, 21) !important}
|
||||
#select2-infosrc-lang-select-results span.flag{width:100%; height:100%; display:block}
|
||||
</style>
|
||||
|
||||
#if $varExists('header')
|
||||
<h1 class="header">$header</h1>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#import sickgear
|
||||
#import datetime
|
||||
#import re
|
||||
#import urllib
|
||||
#from sickgear.common import Quality, SNATCHED_ANY, DOWNLOADED, ARCHIVED, FAILED
|
||||
#from sickgear.helpers import anon_url
|
||||
|
@ -176,10 +177,29 @@
|
|||
<li><a id="add-show-name" data-href="$sbRoot/add-shows/find/" tabindex="$tab#set $tab += 1#"><i class="sgicon-addshow"></i>
|
||||
<input class="form-control form-control-inline input-sm" type="text" placeholder="Search" tabindex="$tab#set $tab += 1#">
|
||||
<div class="menu-item-desc opacity60">find show at TV info source</div></a></li>
|
||||
#set $tvm_modes = dict(tvm_premieres='new shows', tvm_returning='returning')
|
||||
#set $tvm_mode = $tvm_modes.get($sg_var('TVM_MRU'), 'new shows')
|
||||
<li><a href="$sbRoot/add-shows/tvm-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-tvmaze"></i>TVmaze Cards
|
||||
<div class="menu-item-desc opacity60">$tvm_mode...</div></a></li>
|
||||
#set $tmdb_modes = dict(tmdb_upcoming='upcoming', tmdb_popular='popular', tmdb_toprated='top rated', tmdb_trending_today='trending today', tmdb_trending_week='trending this week')
|
||||
#set $tmdb_mode = $tmdb_modes.get($sg_var('TMDB_MRU'), 'upcoming')
|
||||
<li><a href="$sbRoot/add-shows/tmdb-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-addshow"></i>TMDB Cards
|
||||
<div class="menu-item-desc opacity60">$tmdb_mode...</div></a></li>
|
||||
#set $trakt_modes = dict(trakt_anticipated='anticipated', trakt_returning='returning', trakt_newshows='premieres', trakt_popular='popular', trakt_trending='trending',
|
||||
trakt_watched='most watched this month', trakt_watched_period_year='most watched this year',
|
||||
trakt_played='most played this month', trakt_played_period_year='most played this year',
|
||||
trakt_collected='most collected this month', trakt_collected_period_year='most collected this year',
|
||||
trakt_recommended='recommended', trakt_watchlist='watchlist')
|
||||
#set $trakt_mode = $trakt_modes.get(re.sub('[\?=]', '_', $sg_var('TRAKT_MRU')), 'trends, tailored suggestions')
|
||||
<li><a href="$sbRoot/add-shows/trakt-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-trakt"></i>Trakt Cards
|
||||
<div class="menu-item-desc opacity60">trends, tailored suggestions...</div></a></li>
|
||||
<div class="menu-item-desc opacity60">$trakt_mode...</div></a></li>
|
||||
#set $imdb_func = $sg_str('IMDB_MRU').split('-')
|
||||
#set $imdb_mru, $params = ($imdb_func[0], '') if 2 > len($imdb_func) else $imdb_func
|
||||
#set $period = ('', ' %s' % $params.replace(',', ' to '))['popular' == $imdb_mru]
|
||||
#set $imdb_modes = dict(popular='popular', watchlist='watchlist')
|
||||
#set $imdb_mode = $imdb_modes.get($imdb_mru, 'popular decades, watchlists...')
|
||||
<li><a href="$sbRoot/add-shows/imdb-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-imdb"></i>IMDb Cards
|
||||
<div class="menu-item-desc opacity60">popular decades, watchlists...</div></a></li>
|
||||
<div class="menu-item-desc opacity60">$imdb_mode$period...</div></a></li>
|
||||
#set $mc_modes = dict(mc_90days='last 90 days', mc_year='by year', mc_discussed='most discussed', mc_shared='most shared')
|
||||
#set $mc_mode = $mc_modes.get($sg_var('MC_MRU'), 'new seasons')
|
||||
<li><a href="$sbRoot/add-shows/mc-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-metac"></i>Metacritic Cards
|
||||
|
@ -188,10 +208,6 @@
|
|||
#set $tvc_mode = $tvc_modes.get($sg_var('TVC_MRU'), 'new shows')
|
||||
<li><a href="$sbRoot/add-shows/tvc-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-tvc"></i>TV Calendar Cards
|
||||
<div class="menu-item-desc opacity60">$tvc_mode...</div></a></li>
|
||||
#set $tvm_modes = dict(tvm_premieres='new shows', tvm_returning='returning')
|
||||
#set $tvm_mode = $tvm_modes.get($sg_var('TVM_MRU'), 'new shows')
|
||||
<li><a href="$sbRoot/add-shows/tvm-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-tvmaze"></i>TVmaze Cards
|
||||
<div class="menu-item-desc opacity60">$tvm_mode...</div></a></li>
|
||||
#set $ne_modes = dict(ne_newpop='new popular', ne_newtop='new top rated', ne_upcoming='upcoming', ne_trending='trending')
|
||||
#set $ne_mode = $ne_modes.get($sg_var('NE_MRU'), 'new popular')
|
||||
<li><a href="$sbRoot/add-shows/ne-default/" tabindex="$tab#set $tab += 1#"><i class="sgicon-ne"></i>Next Episode Cards
|
||||
|
|
|
@ -16,31 +16,73 @@ $(document).ready(function () {
|
|||
return ' class="flag" style="background-image:url(' + $.SickGear.Root + '/images/flags/' + lang + '.png)"'
|
||||
}
|
||||
|
||||
$.getJSON($.SickGear.Root + '/add-shows/get-infosrc-languages', {}, function (data) {
|
||||
var result = '', currentLangAdded = '', selected = ' selected="selected"';
|
||||
function uriFlag(lang) {
|
||||
return $.SickGear.Root + '/images/flags/' + lang + '.png'
|
||||
}
|
||||
|
||||
if (!data.results.length) {
|
||||
result = '<option value="' + config.showLang + '"' + selected + htmlFlag(config.showLang) + '>'
|
||||
$.getJSON($.SickGear.Root + '/add-shows/get-infosrc-languages', {}, function (data) {
|
||||
var htmlText = '', currentLangAdded = '',
|
||||
selected = ' selected="selected"', htmlSelected = '',
|
||||
elInfosrcLang = $('#infosrc-lang-select'),
|
||||
useSelect2 = 0 < data.results_ext.length, populateItem;
|
||||
|
||||
if (!data.results.length && !data.results_ext.length) {
|
||||
htmlText = '<option value="' + config.showLang + '"' + selected + htmlFlag(config.showLang) + '>'
|
||||
+ config.showLang + '</option>';
|
||||
} else {
|
||||
currentLangAdded = !1;
|
||||
if (useSelect2){
|
||||
// 3 letter abbr object
|
||||
$.each(data.results_ext, function (index, obj) {
|
||||
|
||||
htmlSelected = '';
|
||||
if (obj.std_abbr === config.showLang) {
|
||||
currentLangAdded = !0;
|
||||
htmlSelected = selected;
|
||||
}
|
||||
|
||||
htmlText += '<option style="padding-left:25px" value="' + obj.std_abbr + '"'
|
||||
+ ' data-abbr="' + obj.abbr + '"'
|
||||
+ ' data-img="' + uriFlag(obj.std_abbr) + '"'
|
||||
+ ' data-title="' + obj.en + ' (' + obj.orig_abbr + '/' + obj.std_abbr + '/' + obj.abbr + ')' + '"'
|
||||
+ (!!htmlSelected
|
||||
? htmlSelected + '>> '
|
||||
: '>')
|
||||
+ obj.native
|
||||
+ '</option>';
|
||||
});
|
||||
} else {
|
||||
// legacy 2 letter abbr list
|
||||
$.each(data.results, function (index, strLang) {
|
||||
|
||||
var htmlSelected = '';
|
||||
htmlSelected = '';
|
||||
if (strLang === config.showLang) {
|
||||
currentLangAdded = !0;
|
||||
htmlSelected = selected;
|
||||
}
|
||||
|
||||
result += '<option value="' + strLang + '"' + htmlSelected + htmlFlag(strLang) + '>'
|
||||
htmlText += '<option value="' + strLang + '"' + htmlSelected + htmlFlag(strLang) + '>'
|
||||
+ strLang + '</option>';
|
||||
});
|
||||
|
||||
}
|
||||
if (!currentLangAdded)
|
||||
result += '<option value="' + config.showLang + '" ' + selected + '>' + config.showLang + '</option>';
|
||||
htmlText += '<option value="' + config.showLang + '" ' + selected + '>' + config.showLang + '</option>';
|
||||
}
|
||||
|
||||
$('#infosrc-lang-select-edit').html(result);
|
||||
elInfosrcLang.html(htmlText);
|
||||
|
||||
if (useSelect2) {
|
||||
populateItem = function (data) {
|
||||
if (!!data.element)
|
||||
return $('<span class="flag"'
|
||||
+ ' style="background-image:url(' + $(data.element).data('img') + ')"'
|
||||
+ ' title="' + $(data.element).data('title') + '">'
|
||||
+ data.text
|
||||
+ '</span>');
|
||||
return data.text;
|
||||
}
|
||||
elInfosrcLang.select2({templateResult: populateItem, templateSelection: populateItem, width: 162});
|
||||
}
|
||||
});
|
||||
|
||||
function getExceptions() {
|
||||
|
|
|
@ -9,6 +9,10 @@ $(document).ready(function () {
|
|||
return ' class="flag" style="background-image:url(' + $.SickGear.Root + '/images/flags/' + lang + '.png)"'
|
||||
}
|
||||
|
||||
function uriFlag(lang) {
|
||||
return $.SickGear.Root + '/images/flags/' + lang + '.png'
|
||||
}
|
||||
|
||||
function populateLangSelect() {
|
||||
if (!$('#nameToSearch').length)
|
||||
return;
|
||||
|
@ -17,27 +21,58 @@ $(document).ready(function () {
|
|||
|
||||
$.getJSON(sbRoot + '/add-shows/get-infosrc-languages', {}, function (data) {
|
||||
|
||||
var resultStr = '', flag,
|
||||
var htmlText = '', flag,
|
||||
selected = ' selected="selected"',
|
||||
elInfosrcLang = $('#infosrc-lang-select');
|
||||
elInfosrcLang = $('#infosrc-lang-select'),
|
||||
useSelect2 = 0 < data.results_ext.length, populateItem;
|
||||
|
||||
if (0 === data.results.length) {
|
||||
resultStr = '<option value="en"' + selected + '>> en</option>';
|
||||
if (0 === data.results.length && 0 === data.results_ext.length) {
|
||||
htmlText = '<option value="en"' + selected + '>> en</option>';
|
||||
} else {
|
||||
if (useSelect2) {
|
||||
$('#nameToSearch').addClass('select2');
|
||||
// 3 letter abbr object
|
||||
$.each(data.results_ext, function (index, obj) {
|
||||
htmlText += '<option style="padding-left:25px" value="' + obj.std_abbr + '"'
|
||||
+ ' data-abbr="' + obj.abbr + '"'
|
||||
+ ' data-img="' + uriFlag(obj.std_abbr) + '"'
|
||||
+ ' data-title="' + obj.en + ' (' + obj.orig_abbr + '/' + obj.std_abbr + '/' + obj.abbr + ')' + '"'
|
||||
+ ('' === htmlText
|
||||
? selected + '>> '
|
||||
: '>')
|
||||
+ obj.native
|
||||
+ '</option>';
|
||||
});
|
||||
} else {
|
||||
// legacy 2 letter abbr list
|
||||
$.each(data.results, function (index, obj) {
|
||||
flag = htmlFlag(obj);
|
||||
resultStr += '<option value="' + obj + '"'
|
||||
+ ('' === resultStr
|
||||
htmlText += '<option value="' + obj + '"'
|
||||
+ ('' === htmlText
|
||||
? flag.replace('"flag', '"flag selected-text') + selected + '>> '
|
||||
: flag + '>')
|
||||
+ obj + '</option>';
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
elInfosrcLang.html(resultStr);
|
||||
elInfosrcLang.html(htmlText);
|
||||
elInfosrcLang.change(function () {
|
||||
searchIndexers();
|
||||
});
|
||||
|
||||
if (useSelect2) {
|
||||
populateItem = function(data) {
|
||||
if (!!data.element)
|
||||
return $('<span class="flag"'
|
||||
+ ' style="background-image:url(' + $(data.element).data('img') + ')"'
|
||||
+ ' title="' + $(data.element).data('title') + '">'
|
||||
+ data.text
|
||||
+ '</span>');
|
||||
return data.text;
|
||||
}
|
||||
elInfosrcLang.select2({templateResult: populateItem, templateSelection: populateItem, width: 155});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,15 +13,20 @@ import re
|
|||
from bs4_parser import BS4Parser
|
||||
from exceptions_helper import ex
|
||||
from lib import imdbpie
|
||||
# from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound
|
||||
from lib.tvinfo_base import PersonGenders, TVInfoBase, TVInfoIDs, TVInfoCharacter, TVInfoPerson, TVInfoShow, \
|
||||
TVINFO_IMDB
|
||||
# , TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TVDB, TVINFO_TVRAGE, \
|
||||
# TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO_TWITTER, TVINFO_WIKIPEDIA
|
||||
from lib.dateutil.parser import parser
|
||||
from sg_helpers import get_url, try_int
|
||||
# from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound
|
||||
from lib.tvinfo_base import (
|
||||
TVInfoCharacter, TVInfoPerson, PersonGenders, TVINFO_IMDB,
|
||||
# TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO_TMDB, TVINFO_TRAKT,
|
||||
# TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_TWITTER, TVINFO_WIKIPEDIA,
|
||||
TVInfoBase, TVInfoIDs, TVInfoShow)
|
||||
from sg_helpers import clean_data, enforce_type, get_url, try_int
|
||||
from json_helper import json_loads
|
||||
|
||||
from six import iteritems
|
||||
from six.moves import http_client as httplib
|
||||
from six.moves.urllib.parse import urlencode, urljoin, quote, unquote
|
||||
|
||||
|
||||
# noinspection PyUnreachableCode
|
||||
if False:
|
||||
|
@ -33,6 +38,37 @@ log = logging.getLogger('imdb.api')
|
|||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def _get_imdb(self, url, query=None, params=None):
|
||||
headers = {'Accept-Language': self.locale}
|
||||
if params:
|
||||
full_url = '{0}?{1}'.format(url, urlencode(params))
|
||||
else:
|
||||
full_url = url
|
||||
headers.update(self.get_auth_headers(full_url))
|
||||
resp = get_url(url, headers=headers, params=params, return_response=True)
|
||||
|
||||
if not resp.ok:
|
||||
if resp.status_code == httplib.NOT_FOUND:
|
||||
raise LookupError('Resource {0} not found'.format(url))
|
||||
else:
|
||||
msg = '{0} {1}'.format(resp.status_code, resp.text)
|
||||
raise imdbpie.ImdbAPIError(msg)
|
||||
resp_data = resp.content.decode('utf-8')
|
||||
try:
|
||||
resp_dict = json_loads(resp_data)
|
||||
except ValueError:
|
||||
resp_dict = self._parse_dirty_json(
|
||||
data=resp_data, query=query
|
||||
)
|
||||
|
||||
if resp_dict.get('error'):
|
||||
return None
|
||||
return resp_dict
|
||||
|
||||
|
||||
imdbpie.Imdb._get = _get_imdb
|
||||
|
||||
|
||||
class IMDbIndexer(TVInfoBase):
|
||||
# supported_id_searches = [TVINFO_IMDB]
|
||||
supported_person_id_searches = [TVINFO_IMDB]
|
||||
|
@ -67,12 +103,13 @@ class IMDbIndexer(TVInfoBase):
|
|||
"""
|
||||
def _make_result_dict(s):
|
||||
imdb_id = try_int(re.search(r'tt(\d+)', s.get('id') or s.get('imdb_id')).group(1), None)
|
||||
tvs = TVInfoShow()
|
||||
tvs.seriesname, tvs.id, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids = \
|
||||
s['title'], imdb_id, s.get('releaseDetails', {}).get('date') or s.get('year'), s.get('genres'), \
|
||||
s.get('plot', {}).get('outline', {}).get('text'), s.get('image') and s['image'].get('url'), \
|
||||
TVInfoIDs(imdb=imdb_id)
|
||||
return tvs
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.seriesname, ti_show.id, ti_show.firstaired, ti_show.genre_list, ti_show.overview, \
|
||||
ti_show.poster, ti_show.ids = \
|
||||
clean_data(s['title']), imdb_id, s.get('releaseDetails', {}).get('date') or s.get('year'), \
|
||||
s.get('genres'), enforce_type(clean_data(s.get('plot', {}).get('outline', {}).get('text')), str, ''), \
|
||||
s.get('image') and s['image'].get('url'), TVInfoIDs(imdb=imdb_id)
|
||||
return ti_show
|
||||
|
||||
results = []
|
||||
if ids:
|
||||
|
@ -106,20 +143,20 @@ class IMDbIndexer(TVInfoBase):
|
|||
def _convert_person(person_obj, filmography=None, bio=None):
|
||||
if isinstance(person_obj, dict) and 'imdb_id' in person_obj:
|
||||
imdb_id = try_int(re.search(r'(\d+)', person_obj['imdb_id']).group(1))
|
||||
return TVInfoPerson(p_id=imdb_id, name=person_obj['name'], ids={TVINFO_IMDB: imdb_id})
|
||||
return TVInfoPerson(p_id=imdb_id, name=person_obj['name'], ids=TVInfoIDs(ids={TVINFO_IMDB: imdb_id}))
|
||||
characters = []
|
||||
for known_for in (filmography and filmography['filmography']) or []:
|
||||
if known_for['titleType'] not in ('tvSeries', 'tvMiniSeries'):
|
||||
continue
|
||||
for character in known_for.get('characters') or []:
|
||||
show = TVInfoShow()
|
||||
show.id = try_int(re.search(r'(\d+)', known_for.get('id')).group(1))
|
||||
show.ids.imdb = show.id
|
||||
show.seriesname = known_for.get('title')
|
||||
show.firstaired = known_for.get('year')
|
||||
for character in known_for.get('characters') or ['unknown name']:
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.id = try_int(re.search(r'(\d+)', known_for.get('id')).group(1))
|
||||
ti_show.ids.imdb = ti_show.id
|
||||
ti_show.seriesname = known_for.get('title')
|
||||
ti_show.firstaired = known_for.get('year')
|
||||
characters.append(
|
||||
TVInfoCharacter(name=character, show=show,
|
||||
start_year=known_for.get('startYear'), end_year=known_for.get('endYear'))
|
||||
TVInfoCharacter(name=character, ti_show=ti_show, start_year=known_for.get('startYear'),
|
||||
end_year=known_for.get('endYear'))
|
||||
)
|
||||
try:
|
||||
birthdate = person_obj['base']['birthDate'] and tz_p.parse(person_obj['base']['birthDate']).date()
|
||||
|
@ -131,7 +168,7 @@ class IMDbIndexer(TVInfoBase):
|
|||
deathdate = None
|
||||
imdb_id = try_int(re.search(r'(\d+)', person_obj['id']).group(1))
|
||||
return TVInfoPerson(
|
||||
p_id=imdb_id, ids={TVINFO_IMDB: imdb_id}, characters=characters,
|
||||
p_id=imdb_id, ids=TVInfoIDs(ids={TVINFO_IMDB: imdb_id}), characters=characters,
|
||||
name=person_obj['base'].get('name'), real_name=person_obj['base'].get('realName'),
|
||||
nicknames=set((person_obj['base'].get('nicknames') and person_obj['base'].get('nicknames')) or []),
|
||||
akas=set((person_obj['base'].get('akas') and person_obj['base'].get('akas')) or []),
|
||||
|
@ -175,7 +212,8 @@ class IMDbIndexer(TVInfoBase):
|
|||
results.append(self._convert_person(cp))
|
||||
return results
|
||||
|
||||
def _get_bio(self, p_id):
|
||||
@staticmethod
|
||||
def _get_bio(p_id):
|
||||
try:
|
||||
bio = get_url('https://www.imdb.com/name/nm%07d/bio' % p_id, headers={'Accept-Language': 'en'})
|
||||
if not bio:
|
||||
|
@ -217,4 +255,3 @@ class IMDbIndexer(TVInfoBase):
|
|||
self._set_cache_entry(cache_credits_key, fg)
|
||||
if p:
|
||||
return self._convert_person(p, filmography=fg, bio=bio)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ __api_version__ = '1.0.0'
|
|||
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
|
||||
from lib import tmdbsimple
|
||||
from lib.dateutil.parser import parser
|
||||
|
@ -18,13 +19,13 @@ from lib.tvinfo_base import CastList, PersonGenders, RoleTypes, \
|
|||
TVINFO_IMDB, TVINFO_TMDB, TVINFO_TVDB, \
|
||||
TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO_TWITTER
|
||||
from json_helper import json_dumps
|
||||
from sg_helpers import clean_data, get_url, iterate_chunk, try_int
|
||||
from sg_helpers import clean_data, enforce_type, get_url, iterate_chunk, try_int
|
||||
|
||||
from six import iteritems
|
||||
|
||||
# noinspection PyUnreachableCode
|
||||
if False:
|
||||
from typing import Any, AnyStr, Dict, List, Optional
|
||||
from typing import Any, AnyStr, Dict, List, Optional, Union
|
||||
from six import integer_types
|
||||
|
||||
log = logging.getLogger('tmdb.api')
|
||||
|
@ -179,17 +180,19 @@ class TmdbIndexer(TVInfoBase):
|
|||
self.size_map = response.get('size_map')
|
||||
self.tv_genres = response.get('genres')
|
||||
|
||||
def _search_show(self, name=None, ids=None, **kwargs):
|
||||
# type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow]
|
||||
def _search_show(self, name=None, ids=None, lang=None, **kwargs):
|
||||
# type: (Union[AnyStr, List[AnyStr]], Dict[integer_types, integer_types], Optional[string_types], Optional[Any]) -> List[Dict]
|
||||
"""This searches TMDB for the series name,
|
||||
"""
|
||||
tmdb_lang = ('en-US', lang)[lang in self._tmdb_supported_lang_list]
|
||||
|
||||
def _make_result_dict(s):
|
||||
tvs = TVInfoShow()
|
||||
tvs.seriesname, tvs.id, tvs.seriesid, tvs.firstaired, tvs.genre_list, tvs.overview, tvs.poster, tvs.ids, \
|
||||
tvs.language, tvs.popularity, tvs.rating = \
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.seriesname, ti_show.id, ti_show.seriesid, ti_show.firstaired, ti_show.genre_list, \
|
||||
ti_show.overview, ti_show.poster, ti_show.ids, ti_show.language, ti_show.popularity, ti_show.rating = \
|
||||
clean_data(s['name']), s['id'], s['id'], clean_data(s.get('first_air_date')) or None, \
|
||||
clean_data([self.tv_genres.get(g) for g in s.get('genre_ids') or []]), \
|
||||
clean_data(s.get('overview')), s.get('poster_path') and '%s%s%s' % (
|
||||
self._enforce_text(s.get('overview')), s.get('poster_path') and '%s%s%s' % (
|
||||
self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original],
|
||||
s.get('poster_path')), \
|
||||
TVInfoIDs(tvdb=s.get('external_ids') and s['external_ids'].get('tvdb_id'),
|
||||
|
@ -197,8 +200,8 @@ class TmdbIndexer(TVInfoBase):
|
|||
imdb=s.get('external_ids') and s['external_ids'].get('imdb_id') and
|
||||
try_int(s['external_ids'].get('imdb_id', '').replace('tt', ''), None)), \
|
||||
clean_data(s.get('original_language')), s.get('popularity'), s.get('vote_average')
|
||||
tvs.genre = '|'.join(tvs.genre_list or [])
|
||||
return tvs
|
||||
ti_show.genre = '|'.join(ti_show.genre_list or [])
|
||||
return ti_show
|
||||
|
||||
results = []
|
||||
if ids:
|
||||
|
@ -209,7 +212,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
is_none, shows = self._get_cache_entry(cache_id_key)
|
||||
if not self.config.get('cache_search') or (None is shows and not is_none):
|
||||
try:
|
||||
show = tmdbsimple.TV(id=p).info(append_to_response='external_ids')
|
||||
show = tmdbsimple.TV(id=p).info(append_to_response='external_ids', language=tmdb_lang)
|
||||
except (BaseException, Exception):
|
||||
continue
|
||||
self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire)
|
||||
|
@ -223,10 +226,10 @@ class TmdbIndexer(TVInfoBase):
|
|||
if not self.config.get('cache_search') or (None is shows and not is_none):
|
||||
try:
|
||||
show = tmdbsimple.Find(id=(p, 'tt%07d' % p)[t == TVINFO_IMDB]).info(
|
||||
external_source=id_map[t])
|
||||
external_source=id_map[t], language=tmdb_lang)
|
||||
if show.get('tv_results') and 1 == len(show['tv_results']):
|
||||
show = tmdbsimple.TV(id=show['tv_results'][0]['id']).info(
|
||||
append_to_response='external_ids')
|
||||
append_to_response='external_ids', language=tmdb_lang)
|
||||
except (BaseException, Exception):
|
||||
continue
|
||||
self._set_cache_entry(cache_id_key, show, expire=self.search_cache_expire)
|
||||
|
@ -241,7 +244,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
is_none, shows = self._get_cache_entry(cache_name_key)
|
||||
if not self.config.get('cache_search') or (None is shows and not is_none):
|
||||
try:
|
||||
shows = tmdbsimple.Search().tv(query=n)
|
||||
shows = tmdbsimple.Search().tv(query=n, language=tmdb_lang)
|
||||
self._set_cache_entry(cache_name_key, shows, expire=self.search_cache_expire)
|
||||
results.extend([_make_result_dict(s) for s in shows.get('results') or []])
|
||||
except (BaseException, Exception) as e:
|
||||
|
@ -252,32 +255,23 @@ class TmdbIndexer(TVInfoBase):
|
|||
results = [seen.add(r.id) or r for r in results if r.id not in seen]
|
||||
return results
|
||||
|
||||
def _convert_person_obj(self, person_obj):
|
||||
gender = PersonGenders.tmdb_map.get(person_obj.get('gender'), PersonGenders.unknown)
|
||||
def _convert_person_obj(self, tmdb_person_obj):
|
||||
gender = PersonGenders.tmdb_map.get(tmdb_person_obj.get('gender'), PersonGenders.unknown)
|
||||
try:
|
||||
birthdate = person_obj.get('birthday') and tz_p.parse(person_obj.get('birthday')).date()
|
||||
birthdate = tmdb_person_obj.get('birthday') and tz_p.parse(tmdb_person_obj.get('birthday')).date()
|
||||
except (BaseException, Exception):
|
||||
birthdate = None
|
||||
try:
|
||||
deathdate = person_obj.get('deathday') and tz_p.parse(person_obj.get('deathday')).date()
|
||||
deathdate = tmdb_person_obj.get('deathday') and tz_p.parse(tmdb_person_obj.get('deathday')).date()
|
||||
except (BaseException, Exception):
|
||||
deathdate = None
|
||||
|
||||
cast = person_obj.get('cast') or person_obj.get('tv_credits', {}).get('cast')
|
||||
person_imdb_id = tmdb_person_obj.get('imdb_id') and try_int(tmdb_person_obj['imdb_id'].replace('nm', ''), None)
|
||||
person_ids = {TVINFO_TMDB: tmdb_person_obj.get('id')}
|
||||
if person_imdb_id:
|
||||
person_ids.update({TVINFO_IMDB: person_imdb_id})
|
||||
|
||||
characters = []
|
||||
for character in cast or []:
|
||||
show = TVInfoShow()
|
||||
show.id = character.get('id')
|
||||
show.ids = TVInfoIDs(ids={TVINFO_TMDB: show.id})
|
||||
show.seriesname = clean_data(character.get('original_name'))
|
||||
show.overview = clean_data(character.get('overview'))
|
||||
show.firstaired = clean_data(character.get('first_air_date'))
|
||||
characters.append(
|
||||
TVInfoCharacter(name=clean_data(character.get('character')), show=show)
|
||||
)
|
||||
|
||||
pi = person_obj.get('images')
|
||||
pi = tmdb_person_obj.get('images')
|
||||
image_url, main_image, thumb_url, main_thumb, image_list = None, None, None, None, []
|
||||
if pi:
|
||||
for i in sorted(pi['profiles'], key=lambda a: a['vote_average'] or 0, reverse=True):
|
||||
|
@ -308,20 +302,62 @@ class TmdbIndexer(TVInfoBase):
|
|||
rating=i['vote_average'],
|
||||
votes=i['vote_count']
|
||||
))
|
||||
elif tmdb_person_obj.get('profile_path'):
|
||||
main_image = '%s%s%s' % (
|
||||
self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original],
|
||||
tmdb_person_obj['profile_path'])
|
||||
main_thumb = '%s%s%s' % (
|
||||
self.img_base_url, self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.medium],
|
||||
tmdb_person_obj['profile_path'])
|
||||
|
||||
person_imdb_id = person_obj.get('imdb_id') and try_int(person_obj['imdb_id'].replace('nm', ''), None)
|
||||
person_ids = {TVINFO_TMDB: person_obj.get('id')}
|
||||
if person_imdb_id:
|
||||
person_ids.update({TVINFO_IMDB: person_imdb_id})
|
||||
return TVInfoPerson(
|
||||
p_id=person_obj.get('id'), ids=person_ids, characters=characters,
|
||||
name=clean_data(person_obj.get('name')), akas=clean_data(set(person_obj.get('also_known_as') or [])),
|
||||
bio=clean_data(person_obj.get('biography')), gender=gender,
|
||||
_it_person_obj = TVInfoPerson(
|
||||
p_id=tmdb_person_obj.get('id'), ids=TVInfoIDs(ids=person_ids), name=clean_data(tmdb_person_obj.get('name')),
|
||||
akas=clean_data(set(tmdb_person_obj.get('also_known_as') or [])),
|
||||
bio=clean_data(tmdb_person_obj.get('biography')), gender=gender,
|
||||
image=main_image, images=image_list, thumb_url=main_thumb,
|
||||
birthdate=birthdate, birthplace=clean_data(person_obj.get('place_of_birth')),
|
||||
deathdate=deathdate, homepage=person_obj.get('homepage')
|
||||
birthdate=birthdate, birthplace=clean_data(tmdb_person_obj.get('place_of_birth')),
|
||||
deathdate=deathdate, homepage=tmdb_person_obj.get('homepage')
|
||||
)
|
||||
|
||||
cast = tmdb_person_obj.get('cast') or tmdb_person_obj.get('tv_credits', {}).get('cast') or \
|
||||
tmdb_person_obj.get('known_for')
|
||||
|
||||
characters = []
|
||||
for character in cast or []:
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.id = character.get('id')
|
||||
ti_show.ids = TVInfoIDs(ids={TVINFO_TMDB: ti_show.id})
|
||||
ti_show.seriesname = enforce_type(clean_data(character.get('original_name')), str, '')
|
||||
ti_show.overview = self._enforce_text(character.get('overview'))
|
||||
ti_show.firstaired = clean_data(character.get('first_air_date'))
|
||||
ti_show.language = clean_data(character.get('original_language'))
|
||||
ti_show.genre_list = []
|
||||
for g in character.get('genre_ids') or []:
|
||||
if g in self.tv_genres:
|
||||
ti_show.genre_list.append(self.tv_genres.get(g))
|
||||
ti_show.genre = '|'.join(ti_show.genre_list)
|
||||
if character.get('poster_path'):
|
||||
ti_show.poster = '%s%s%s' % \
|
||||
(self.img_base_url,
|
||||
self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original],
|
||||
character['poster_path'])
|
||||
ti_show.poster_thumb = '%s%s%s' % \
|
||||
(self.img_base_url,
|
||||
self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.medium],
|
||||
character['poster_path'])
|
||||
if character.get('backdrop_path'):
|
||||
ti_show.fanart = '%s%s%s' % \
|
||||
(self.img_base_url,
|
||||
self.size_map[TVInfoImageType.person_poster][TVInfoImageSize.original],
|
||||
character['backdrop_path'])
|
||||
characters.append(
|
||||
TVInfoCharacter(name=clean_data(character.get('character')), ti_show=ti_show, person=[_it_person_obj],
|
||||
episode_count=character.get('episode_count'))
|
||||
)
|
||||
|
||||
_it_person_obj.characters = characters
|
||||
return _it_person_obj
|
||||
|
||||
def _search_person(self, name=None, ids=None):
|
||||
# type: (AnyStr, Dict[integer_types, integer_types]) -> List[TVInfoPerson]
|
||||
"""
|
||||
|
@ -408,31 +444,33 @@ class TmdbIndexer(TVInfoBase):
|
|||
def _convert_show(self, show_dict, show_obj=None):
|
||||
# type: (Dict, TVInfoShow) -> TVInfoShow
|
||||
if None is show_obj:
|
||||
tv_s = TVInfoShow()
|
||||
ti_show = TVInfoShow()
|
||||
else:
|
||||
tv_s = show_obj
|
||||
ti_show = show_obj
|
||||
if show_dict:
|
||||
tv_s.seriesname = clean_data(show_dict.get('name') or show_dict.get('original_name')
|
||||
ti_show.seriesname = clean_data(show_dict.get('name') or show_dict.get('original_name')
|
||||
or show_dict.get('original_title'))
|
||||
org_title = clean_data(show_dict.get('original_name') or show_dict.get('original_title'))
|
||||
if org_title != tv_s.seriesname:
|
||||
tv_s.aliases = [org_title]
|
||||
tv_s.id = show_dict.get('id')
|
||||
tv_s.seriesid = tv_s.id
|
||||
tv_s.language = clean_data(show_dict.get('original_language'))
|
||||
tv_s.overview = clean_data(show_dict.get('overview'))
|
||||
tv_s.status = clean_data(show_dict.get('status', ''))
|
||||
tv_s.show_type = clean_data((show_dict.get('type') and [show_dict['type']]) or [])
|
||||
tv_s.firstaired = clean_data(show_dict.get('first_air_date'))
|
||||
tv_s.vote_count = show_dict.get('vote_count')
|
||||
tv_s.vote_average = show_dict.get('vote_average')
|
||||
tv_s.popularity = show_dict.get('popularity')
|
||||
tv_s.origin_countries = clean_data(show_dict.get('origin_country') or [])
|
||||
tv_s.genre_list = []
|
||||
if org_title != ti_show.seriesname:
|
||||
ti_show.aliases = [org_title]
|
||||
ti_show.id = show_dict.get('id')
|
||||
ti_show.seriesid = ti_show.id
|
||||
ti_show.language = clean_data(show_dict.get('original_language'))
|
||||
ti_show.spoken_languages = [_l['iso_639_1'] for _l in show_dict.get('spoken_languages') or []]
|
||||
ti_show.overview = self._enforce_text(show_dict.get('overview'))
|
||||
ti_show.status = clean_data(show_dict.get('status', ''))
|
||||
ti_show.show_type = clean_data((show_dict.get('type') and [show_dict['type']]) or [])
|
||||
ti_show.firstaired = clean_data(show_dict.get('first_air_date'))
|
||||
ti_show.popularity = show_dict.get('popularity')
|
||||
ti_show.vote_count = show_dict.get('vote_count')
|
||||
ti_show.vote_average = show_dict.get('vote_average')
|
||||
ti_show.origin_countries = show_dict.get('origin_country') or []
|
||||
ti_show.genre_list = []
|
||||
ti_show.origin_countries = clean_data(show_dict.get('origin_country') or [])
|
||||
for g in show_dict.get('genre_ids') or []:
|
||||
if g in self.tv_genres:
|
||||
tv_s.genre_list.append(self.tv_genres.get(g))
|
||||
tv_s.genre = '|'.join(tv_s.genre_list)
|
||||
ti_show.genre_list.append(self.tv_genres.get(g))
|
||||
ti_show.genre = '|'.join(ti_show.genre_list)
|
||||
runtime = None
|
||||
for r in sorted(show_dict.get('episode_run_time') or [], reverse=True):
|
||||
if 40 < r < 50:
|
||||
|
@ -443,18 +481,18 @@ class TmdbIndexer(TVInfoBase):
|
|||
break
|
||||
if not runtime and show_dict.get('episode_run_time'):
|
||||
runtime = max(show_dict.get('episode_run_time') or [0]) or None
|
||||
tv_s.runtime = runtime
|
||||
ti_show.runtime = runtime
|
||||
|
||||
tv_s.networks = [
|
||||
ti_show.networks = [
|
||||
TVInfoNetwork(name=clean_data(n.get('name')), n_id=n.get('id'),
|
||||
country_code=clean_data(n.get('origin_country')))
|
||||
for n in reversed(show_dict.get('networks') or [])
|
||||
]
|
||||
|
||||
if show_dict.get('networks'):
|
||||
tv_s.network = clean_data(show_dict['networks'][-1]['name'])
|
||||
tv_s.network_id = show_dict['networks'][-1].get('id')
|
||||
tv_s.network_country_code = clean_data(show_dict['networks'][-1].get('origin_country'))
|
||||
ti_show.network = clean_data(show_dict['networks'][-1]['name'])
|
||||
ti_show.network_id = show_dict['networks'][-1].get('id')
|
||||
ti_show.network_country_code = clean_data(show_dict['networks'][-1].get('origin_country'))
|
||||
|
||||
image_url = show_dict.get('poster_path') and '%s%s%s' % \
|
||||
(self.img_base_url, self.size_map[TVInfoImageType.poster][TVInfoImageSize.original],
|
||||
|
@ -465,19 +503,20 @@ class TmdbIndexer(TVInfoBase):
|
|||
backdrop_url = show_dict.get('backdrop_path') and '%s%s%s' % \
|
||||
(self.img_base_url, self.size_map[TVInfoImageType.fanart][TVInfoImageSize.original],
|
||||
show_dict.get('backdrop_path'))
|
||||
tv_s.ids = TVInfoIDs(tvdb=show_dict.get('external_ids', {}).get('tvdb_id'),
|
||||
ti_show.ids = TVInfoIDs(tvdb=show_dict.get('external_ids', {}).get('tvdb_id'),
|
||||
tmdb=show_dict['id'],
|
||||
rage=show_dict.get('external_ids', {}).get('tvrage_id'),
|
||||
imdb=show_dict.get('external_ids', {}).get('imdb_id') and
|
||||
try_int(show_dict.get('external_ids', {}).get('imdb_id', '').replace('tt', ''), None))
|
||||
tv_s.social_ids = TVInfoSocialIDs(twitter=show_dict.get('external_ids', {}).get('twitter_id'),
|
||||
imdb=show_dict.get('external_ids', {}).get('imdb_id')
|
||||
and try_int(
|
||||
show_dict.get('external_ids', {}).get('imdb_id', '').replace('tt', ''), None))
|
||||
ti_show.social_ids = TVInfoSocialIDs(twitter=show_dict.get('external_ids', {}).get('twitter_id'),
|
||||
instagram=show_dict.get('external_ids', {}).get('instagram_id'),
|
||||
facebook=show_dict.get('external_ids', {}).get('facebook_id'))
|
||||
|
||||
tv_s.poster = image_url
|
||||
tv_s.poster_thumb = thumb_image_url
|
||||
tv_s.fanart = backdrop_url
|
||||
return tv_s
|
||||
ti_show.poster = image_url
|
||||
ti_show.poster_thumb = thumb_image_url
|
||||
ti_show.fanart = backdrop_url
|
||||
return ti_show
|
||||
|
||||
def _get_show_list(self, src_method, result_count, **kwargs):
|
||||
result = []
|
||||
|
@ -497,7 +536,26 @@ class TmdbIndexer(TVInfoBase):
|
|||
pass
|
||||
return result[:result_count]
|
||||
|
||||
def get_similar(self, tvid, result_count=100, **kwargs):
|
||||
# type: (integer_types, int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list of similar shows to the provided tv id
|
||||
:param tvid: id to find similar shows for
|
||||
:param result_count: result count to returned
|
||||
"""
|
||||
return self._get_show_list(tmdbsimple.TV(id=tvid).similar, result_count)
|
||||
|
||||
def get_recommended_for_show(self, tvid, result_count=100, **kwargs):
|
||||
# type: (integer_types, int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list of recommended shows to the provided tv id
|
||||
:param tvid: id to find recommended shows for
|
||||
:param result_count: result count to returned
|
||||
"""
|
||||
return self._get_show_list(tmdbsimple.TV(id=tvid).recommendations, result_count)
|
||||
|
||||
def get_trending(self, result_count=100, time_window='day', **kwargs):
|
||||
# type: (int, str, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list of trending tv shows for day or week
|
||||
:param result_count:
|
||||
|
@ -507,12 +565,15 @@ class TmdbIndexer(TVInfoBase):
|
|||
return self._get_show_list(tmdbsimple.Trending(media_type='tv', time_window=t_windows).info, result_count)
|
||||
|
||||
def get_popular(self, result_count=100, **kwargs):
|
||||
# type: (int, Any) -> List[TVInfoShow]
|
||||
return self._get_show_list(tmdbsimple.TV().popular, result_count)
|
||||
|
||||
def get_top_rated(self, result_count=100, **kwargs):
|
||||
# type: (int, Any) -> List[TVInfoShow]
|
||||
return self._get_show_list(tmdbsimple.TV().top_rated, result_count)
|
||||
|
||||
def discover(self, result_count=100, **kwargs):
|
||||
# type: (int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
Discover TV shows by different types of data like average rating,
|
||||
number of votes, genres, the network they aired on and air dates.
|
||||
|
@ -595,6 +656,12 @@ class TmdbIndexer(TVInfoBase):
|
|||
|
||||
:param result_count:
|
||||
"""
|
||||
if not kwargs:
|
||||
# use default if now kwargs are set = return all future airdate shows with language set to 'en'
|
||||
kwargs.update({'sort_by': 'first_air_date.asc',
|
||||
'first_air_date.gte': datetime.date.today().strftime('%Y-%m-%d'),
|
||||
'with_original_language': 'en',
|
||||
})
|
||||
return self._get_show_list(tmdbsimple.Discover().tv, result_count, **kwargs)
|
||||
|
||||
def _get_show_data(self, sid, language, get_ep_info=False, banners=False, posters=False, seasons=False,
|
||||
|
@ -606,9 +673,9 @@ class TmdbIndexer(TVInfoBase):
|
|||
tmdb_lang = ('en-US', language)[language in self._tmdb_supported_lang_list]
|
||||
if any((banners, posters, seasons, seasonwides, fanart)):
|
||||
to_append.append('images')
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False):
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.ti_shows.get(sid), 'actors_loaded', False):
|
||||
to_append.append('aggregate_credits')
|
||||
if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False):
|
||||
if get_ep_info and not getattr(self.ti_shows.get(sid), 'ep_loaded', False):
|
||||
to_append.append('episode_groups')
|
||||
try:
|
||||
tmdb = tmdbsimple.TV(sid)
|
||||
|
@ -624,7 +691,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
self.show_not_found = True
|
||||
return False
|
||||
|
||||
show_obj = self.shows[sid]
|
||||
show_obj = self.ti_shows[sid]
|
||||
|
||||
self._convert_show(show_data, show_obj)
|
||||
|
||||
|
@ -656,7 +723,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
)
|
||||
|
||||
season_cast_objs = {}
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False):
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.ti_shows.get(sid), 'actors_loaded', False):
|
||||
cast, show_obj.actors_loaded = CastList(), True
|
||||
if isinstance(show_data.get('aggregate_credits'), dict) and 'cast' in show_data['aggregate_credits'] and\
|
||||
isinstance(show_data['aggregate_credits']['cast'], list):
|
||||
|
@ -692,6 +759,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
person=[
|
||||
TVInfoPerson(
|
||||
p_id=person_obj['id'], name=clean_data(person_obj['name']),
|
||||
ids=TVInfoIDs(ids={TVINFO_TMDB: person_obj['id']}),
|
||||
image='%s%s%s' % (
|
||||
self.img_base_url,
|
||||
self.size_map[TVInfoImageType.person_poster][
|
||||
|
@ -723,7 +791,7 @@ class TmdbIndexer(TVInfoBase):
|
|||
},
|
||||
} for ch in cast[RoleTypes.ActorMain]]
|
||||
|
||||
if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False):
|
||||
if get_ep_info and not getattr(self.ti_shows.get(sid), 'ep_loaded', False):
|
||||
show_obj.ep_loaded = True
|
||||
seasons = ['season/%d' % s['season_number'] for s in show_data.get('seasons') or []]
|
||||
# call limited to 20 seasons per call
|
||||
|
@ -778,3 +846,19 @@ class TmdbIndexer(TVInfoBase):
|
|||
else:
|
||||
TmdbIndexer._supported_languages = []
|
||||
TmdbIndexer._tmdb_lang_list = []
|
||||
|
||||
@staticmethod
|
||||
def _enforce_text(text):
|
||||
"""
|
||||
Set nonsense text to an enforced type
|
||||
:param text:
|
||||
:type text: AnyStr
|
||||
:return:
|
||||
:rtype: AnyStr
|
||||
"""
|
||||
text = enforce_type(clean_data(text), str, '').strip()
|
||||
tmp = text.lower()
|
||||
if 'details here' == tmp \
|
||||
or re.search(r'no(\s\w+){1,2}\savailable', tmp):
|
||||
return ''
|
||||
return text
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
import datetime
|
||||
import logging
|
||||
import re
|
||||
from .exceptions import TraktException
|
||||
from .exceptions import TraktException, TraktAuthException
|
||||
from exceptions_helper import ConnectionSkipException, ex
|
||||
from six import iteritems
|
||||
from .trakt import TraktAPI
|
||||
from lib.tvinfo_base.exceptions import BaseTVinfoShownotfound
|
||||
from lib.tvinfo_base import TVInfoBase, TVINFO_TRAKT, TVINFO_TMDB, TVINFO_TVDB, TVINFO_TVRAGE, TVINFO_IMDB, \
|
||||
TVINFO_SLUG, TVInfoPerson, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, TVInfoCharacter, TVInfoShow, \
|
||||
TVInfoIDs, TVINFO_TRAKT_SLUG
|
||||
from sg_helpers import try_int
|
||||
TVINFO_SLUG, TVInfoPerson, TVINFO_TWITTER, TVINFO_FACEBOOK, TVINFO_WIKIPEDIA, TVINFO_INSTAGRAM, TVInfoCharacter, \
|
||||
TVInfoShow, TVInfoIDs, TVInfoSocialIDs, TVINFO_TRAKT_SLUG, TVInfoEpisode, TVInfoSeason, RoleTypes
|
||||
from sg_helpers import clean_data, enforce_type, try_int
|
||||
from lib.dateutil.parser import parser
|
||||
|
||||
# noinspection PyUnreachableCode
|
||||
|
@ -33,6 +34,7 @@ log.addHandler(logging.NullHandler())
|
|||
|
||||
|
||||
def _convert_imdb_id(src, s_id):
|
||||
# type: (int, integer_types) -> integer_types
|
||||
if TVINFO_IMDB == src:
|
||||
try:
|
||||
return try_int(re.search(r'(\d+)', s_id).group(1), s_id)
|
||||
|
@ -100,16 +102,29 @@ class TraktIndexer(TVInfoBase):
|
|||
|
||||
@staticmethod
|
||||
def _make_result_obj(shows, results):
|
||||
# type: (List[Dict], List[TVInfoShow]) -> None
|
||||
if shows:
|
||||
try:
|
||||
for s in shows:
|
||||
if s['ids']['trakt'] not in [i['ids'].trakt for i in results]:
|
||||
s['id'] = s['ids']['trakt']
|
||||
s['ids'] = TVInfoIDs(
|
||||
trakt=s['ids']['trakt'], tvdb=s['ids']['tvdb'], tmdb=s['ids']['tmdb'],
|
||||
ti_show = TVInfoShow()
|
||||
countries = clean_data(s['country'])
|
||||
if countries:
|
||||
countries = [countries]
|
||||
else:
|
||||
countries = []
|
||||
ti_show.id, ti_show.seriesname, ti_show.overview, ti_show.firstaired, ti_show.airs_dayofweek, \
|
||||
ti_show.runtime, ti_show.network, ti_show.origin_countries, ti_show.official_site, \
|
||||
ti_show.status, ti_show.rating, ti_show.genre_list, ti_show.ids = s['ids']['trakt'], \
|
||||
clean_data(s['title']), enforce_type(clean_data(s['overview']), str, ''), s['firstaired'], \
|
||||
(isinstance(s['airs'], dict) and s['airs']['day']) or '', \
|
||||
s['runtime'], s['network'], countries, s['homepage'], s['status'], s['rating'], \
|
||||
s['genres_list'], \
|
||||
TVInfoIDs(trakt=s['ids']['trakt'], tvdb=s['ids']['tvdb'], tmdb=s['ids']['tmdb'],
|
||||
rage=s['ids']['tvrage'],
|
||||
imdb=s['ids']['imdb'] and try_int(s['ids']['imdb'].replace('tt', ''), None))
|
||||
results.append(s)
|
||||
ti_show.genre = '|'.join(ti_show.genre_list or [])
|
||||
results.append(ti_show)
|
||||
except (BaseException, Exception) as e:
|
||||
log.debug('Error creating result dict: %s' % ex(e))
|
||||
|
||||
|
@ -119,7 +134,7 @@ class TraktIndexer(TVInfoBase):
|
|||
If a custom_ui UI is configured, it uses this to select the correct
|
||||
series.
|
||||
"""
|
||||
results = []
|
||||
results = [] # type: List[TVInfoShow]
|
||||
if ids:
|
||||
for t, p in iteritems(ids):
|
||||
if t in self.supported_id_searches:
|
||||
|
@ -168,13 +183,13 @@ class TraktIndexer(TVInfoBase):
|
|||
else:
|
||||
self._make_result_obj(all_series, results)
|
||||
|
||||
final_result = []
|
||||
final_result = [] # type: List[TVInfoShow]
|
||||
seen = set()
|
||||
film_type = re.compile(r'(?i)films?\)$')
|
||||
for r in results:
|
||||
if r['id'] not in seen:
|
||||
seen.add(r['id'])
|
||||
title = r.get('title') or ''
|
||||
if r.id not in seen:
|
||||
seen.add(r.id)
|
||||
title = r.seriesname or ''
|
||||
if not film_type.search(title):
|
||||
final_result.append(r)
|
||||
else:
|
||||
|
@ -247,17 +262,19 @@ class TraktIndexer(TVInfoBase):
|
|||
deathdate=deathdate,
|
||||
homepage=person_obj['homepage'],
|
||||
birthplace=person_obj['birthplace'],
|
||||
social_ids={TVINFO_TWITTER: person_obj['social_ids']['twitter'],
|
||||
social_ids=TVInfoSocialIDs(
|
||||
ids={TVINFO_TWITTER: person_obj['social_ids']['twitter'],
|
||||
TVINFO_FACEBOOK: person_obj['social_ids']['facebook'],
|
||||
TVINFO_INSTAGRAM: person_obj['social_ids']['instagram'],
|
||||
TVINFO_WIKIPEDIA: person_obj['social_ids']['wikipedia']
|
||||
},
|
||||
ids={TVINFO_TRAKT: person_obj['ids']['trakt'], TVINFO_SLUG: person_obj['ids']['slug'],
|
||||
}),
|
||||
ids=TVInfoIDs(ids={
|
||||
TVINFO_TRAKT: person_obj['ids']['trakt'], TVINFO_SLUG: person_obj['ids']['slug'],
|
||||
TVINFO_IMDB:
|
||||
person_obj['ids']['imdb'] and
|
||||
try_int(person_obj['ids']['imdb'].replace('nm', ''), None),
|
||||
TVINFO_TMDB: person_obj['ids']['tmdb'],
|
||||
TVINFO_TVRAGE: person_obj['ids']['tvrage']})
|
||||
TVINFO_TVRAGE: person_obj['ids']['tvrage']}))
|
||||
|
||||
def get_person(self, p_id, get_show_credits=False, get_images=False, **kwargs):
|
||||
# type: (integer_types, bool, bool, Any) -> Optional[TVInfoPerson]
|
||||
|
@ -279,7 +296,7 @@ class TraktIndexer(TVInfoBase):
|
|||
if not urls:
|
||||
return
|
||||
|
||||
result = None
|
||||
result = None # type: Optional[TVInfoPerson]
|
||||
|
||||
for url, show_credits in urls:
|
||||
try:
|
||||
|
@ -292,25 +309,25 @@ class TraktIndexer(TVInfoBase):
|
|||
if show_credits:
|
||||
pc = []
|
||||
for c in resp.get('cast') or []:
|
||||
show = TVInfoShow()
|
||||
show.id = c['show']['ids'].get('trakt')
|
||||
show.seriesname = c['show']['title']
|
||||
show.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid)
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.id = c['show']['ids'].get('trakt')
|
||||
ti_show.seriesname = c['show']['title']
|
||||
ti_show.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid)
|
||||
for src, sid in iteritems(c['show']['ids']) if src in id_map})
|
||||
show.network = c['show']['network']
|
||||
show.firstaired = c['show']['first_aired']
|
||||
show.overview = c['show']['overview']
|
||||
show.status = c['show']['status']
|
||||
show.imdb_id = c['show']['ids'].get('imdb')
|
||||
show.runtime = c['show']['runtime']
|
||||
show.genre_list = c['show']['genres']
|
||||
ti_show.network = c['show']['network']
|
||||
ti_show.firstaired = c['show']['first_aired']
|
||||
ti_show.overview = enforce_type(clean_data(c['show']['overview']), str, '')
|
||||
ti_show.status = c['show']['status']
|
||||
ti_show.imdb_id = c['show']['ids'].get('imdb')
|
||||
ti_show.runtime = c['show']['runtime']
|
||||
ti_show.genre_list = c['show']['genres']
|
||||
for ch in c.get('characters') or []:
|
||||
pc.append(
|
||||
TVInfoCharacter(
|
||||
name=ch, regular=c.get('series_regular'),
|
||||
show=show
|
||||
)
|
||||
)
|
||||
_ti_character = TVInfoCharacter(name=ch, regular=c.get('series_regular'),
|
||||
ti_show=ti_show, person=[result],
|
||||
episode_count=c.get('episode_count'))
|
||||
pc.append(_ti_character)
|
||||
ti_show.cast[(RoleTypes.ActorGuest, RoleTypes.ActorMain)[
|
||||
c.get('series_regular', False)]].append(_ti_character)
|
||||
result.characters = pc
|
||||
else:
|
||||
result = self._convert_person_obj(resp)
|
||||
|
@ -356,3 +373,268 @@ class TraktIndexer(TVInfoBase):
|
|||
log.debug('Could not connect to Trakt service: %s' % ex(e))
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _convert_episode(episode_data, show_obj, season_obj):
|
||||
# type: (Dict, TVInfoShow, TVInfoSeason) -> TVInfoEpisode
|
||||
ti_episode = TVInfoEpisode(show=show_obj)
|
||||
ti_episode.season = season_obj
|
||||
ti_episode.id, ti_episode.episodename, ti_episode.seasonnumber, ti_episode.episodenumber, \
|
||||
ti_episode.absolute_number, ti_episode.overview, ti_episode.firstaired, ti_episode.runtime, \
|
||||
ti_episode.rating, ti_episode.vote_count = episode_data.get('ids', {}).get('trakt'), \
|
||||
clean_data(episode_data.get('title')), episode_data.get('season'), episode_data.get('number'), \
|
||||
episode_data.get('number_abs'), enforce_type(clean_data(episode_data.get('overview')), str, ''), \
|
||||
re.sub('T.+$', '', episode_data.get('first_aired') or ''), \
|
||||
episode_data['runtime'], episode_data.get('rating'), episode_data.get('votes')
|
||||
if episode_data.get('available_translations'):
|
||||
ti_episode.language = clean_data(episode_data['available_translations'][0])
|
||||
ti_episode.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid)
|
||||
for src, sid in iteritems(episode_data['ids']) if src in id_map})
|
||||
return ti_episode
|
||||
|
||||
@staticmethod
|
||||
def _convert_show(show_data):
|
||||
# type: (Dict) -> TVInfoShow
|
||||
_s_d = (show_data, show_data.get('show'))['show' in show_data]
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.seriesname, ti_show.id, ti_show.firstaired, ti_show.overview, ti_show.runtime, ti_show.network, \
|
||||
ti_show.network_country, ti_show.status, ti_show.genre_list, ti_show.language, ti_show.watcher_count, \
|
||||
ti_show.play_count, ti_show.collected_count, ti_show.collector_count, ti_show.vote_count, \
|
||||
ti_show.vote_average, ti_show.rating, ti_show.contentrating, ti_show.official_site, ti_show.slug = \
|
||||
clean_data(_s_d['title']), _s_d['ids']['trakt'], \
|
||||
re.sub('T.+$', '', _s_d.get('first_aired') or '') or _s_d.get('year'), \
|
||||
enforce_type(clean_data(_s_d.get('overview')), str, ''), _s_d.get('runtime'), _s_d.get('network'), \
|
||||
_s_d.get('country'), _s_d.get('status'), _s_d.get('genres', []), _s_d.get('language'), \
|
||||
show_data.get('watcher_count'), show_data.get('play_count'), show_data.get('collected_count'), \
|
||||
show_data.get('collector_count'), _s_d.get('votes'), _s_d.get('rating'), _s_d.get('rating'), \
|
||||
_s_d.get('certification'), _s_d.get('homepage'), _s_d['ids']['slug']
|
||||
ti_show.ids = TVInfoIDs(ids={id_map[src]: _convert_imdb_id(id_map[src], sid)
|
||||
for src, sid in iteritems(_s_d['ids']) if src in id_map})
|
||||
ti_show.genre = '|'.join(ti_show.genre_list or [])
|
||||
if _s_d.get('trailer'):
|
||||
ti_show.trailers = {'any': _s_d['trailer']}
|
||||
if 'episode' in show_data:
|
||||
ep_data = show_data['episode']
|
||||
ti_show.next_season_airdate = re.sub('T.+$', '', ep_data.get('first_aired') or '')
|
||||
ti_season = TVInfoSeason(show=ti_show)
|
||||
ti_season.number = ep_data['season']
|
||||
ti_season[ep_data['number']] = TraktIndexer._convert_episode(ep_data, ti_show, ti_season)
|
||||
ti_show[ep_data['season']] = ti_season
|
||||
return ti_show
|
||||
|
||||
def _get_show_lists(self, url, account=None):
|
||||
# type: (str, Any) -> List[TVInfoShow]
|
||||
result = []
|
||||
if account:
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if account in TRAKT_ACCOUNTS and TRAKT_ACCOUNTS[account].active:
|
||||
kw = {'send_oauth': account}
|
||||
else:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
else:
|
||||
kw = {}
|
||||
resp = TraktAPI().trakt_request(url, **kw)
|
||||
if resp:
|
||||
for _show in resp:
|
||||
result.append(self._convert_show(_show))
|
||||
return result
|
||||
|
||||
def get_most_played(self, result_count=100, period='weekly', **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most played shows
|
||||
:param period: possible values: 'daily', 'weekly', 'monthly', 'yearly', 'all'
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
use_period = ('weekly', period)[period in ('daily', 'weekly', 'monthly', 'yearly', 'all')]
|
||||
return self._get_show_lists('shows/played/%s?extended=full&page=%d&limit=%d' % (use_period, 1, result_count))
|
||||
|
||||
def get_most_watched(self, result_count=100, period='weekly', **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most watched shows
|
||||
:param period: possible values: 'daily', 'weekly', 'monthly', 'yearly', 'all'
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
use_period = ('weekly', period)[period in ('daily', 'weekly', 'monthly', 'yearly', 'all')]
|
||||
return self._get_show_lists('shows/watched/%s?extended=full&page=%d&limit=%d' % (use_period, 1, result_count))
|
||||
|
||||
def get_most_collected(self, result_count=100, period='weekly', **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most collected shows
|
||||
:param period: possible values: 'daily', 'weekly', 'monthly', 'yearly', 'all'
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
use_period = ('weekly', period)[period in ('daily', 'weekly', 'monthly', 'yearly', 'all')]
|
||||
return self._get_show_lists('shows/collected/%s?extended=full&page=%d&limit=%d' % (use_period, 1, result_count))
|
||||
|
||||
def get_recommended(self, result_count=100, period='weekly', **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most recommended shows
|
||||
:param period: possible values: 'daily', 'weekly', 'monthly', 'yearly', 'all'
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
use_period = ('weekly', period)[period in ('daily', 'weekly', 'monthly', 'yearly', 'all')]
|
||||
return self._get_show_lists('shows/recommended/%s?extended=full&page=%d&limit=%d' % (use_period, 1, result_count))
|
||||
|
||||
def get_recommended_for_account(self, account, result_count=100, ignore_collected=False, ignore_watchlisted=False,
|
||||
**kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most recommended shows for account
|
||||
:param account: account to get recommendations for
|
||||
:param result_count: how many results are suppose to be returned
|
||||
:param ignore_collected: exclude colleded shows
|
||||
:param ignore_watchlisted: exclude watchlisted shows
|
||||
"""
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if not account or account not in TRAKT_ACCOUNTS or not TRAKT_ACCOUNTS[account].active:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
extra_param = []
|
||||
if ignore_collected:
|
||||
extra_param.append('ignore_collected=true')
|
||||
if ignore_watchlisted:
|
||||
extra_param.append('ignore_watchlisted=true')
|
||||
return self._get_show_lists('recommendations/shows?extended=full&page=%d&limit=%d%s' %
|
||||
(1, result_count, ('', '&%s' % '&'.join(extra_param))[0 < len(extra_param)]),
|
||||
account=account)
|
||||
|
||||
def hide_recommended_for_account(self, account, show_ids, **kwargs):
|
||||
# type: (integer_types, List[integer_types], Any) -> List[integer_types]
|
||||
"""
|
||||
hide recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:param show_ids: list of show_ids to no longer recommend for account
|
||||
:return: list of added ids
|
||||
"""
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if not account or account not in TRAKT_ACCOUNTS or not TRAKT_ACCOUNTS[account].active:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
if not isinstance(show_ids, list) or not show_ids or any(not isinstance(_i, int) for _i in show_ids):
|
||||
raise TraktException('list of show_ids (trakt id) required')
|
||||
resp = TraktAPI().trakt_request('users/hidden/recommendations', send_oauth=account,
|
||||
data={'shows': [{'ids': {'trakt': _i}} for _i in show_ids]})
|
||||
if resp and isinstance(resp, dict) and 'added' in resp and 'shows' in resp['added']:
|
||||
if len(show_ids) == resp['added']['shows']:
|
||||
return show_ids
|
||||
if 'not_found' in resp and 'shows' in resp['not_found']:
|
||||
not_found = [_i['ids']['trakt'] for _i in resp['not_found']['shows']]
|
||||
else:
|
||||
not_found = []
|
||||
return [_i for _i in show_ids if _i not in not_found]
|
||||
return []
|
||||
|
||||
def unhide_recommended_for_account(self, account, show_ids, **kwargs):
|
||||
# type: (integer_types, List[integer_types], Any) -> List[integer_types]
|
||||
"""
|
||||
unhide recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:param show_ids: list of show_ids to be included in possible recommend for account
|
||||
:return: list of removed ids
|
||||
"""
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if not account or account not in TRAKT_ACCOUNTS or not TRAKT_ACCOUNTS[account].active:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
if not isinstance(show_ids, list) or not show_ids or any(not isinstance(_i, int) for _i in show_ids):
|
||||
raise TraktException('list of show_ids (trakt id) required')
|
||||
resp = TraktAPI().trakt_request('users/hidden/recommendations/remove', send_oauth=account,
|
||||
data={'shows': [{'ids': {'trakt': _i}} for _i in show_ids]})
|
||||
if resp and isinstance(resp, dict) and 'deleted' in resp and 'shows' in resp['deleted']:
|
||||
if len(show_ids) == resp['deleted']['shows']:
|
||||
return show_ids
|
||||
if 'not_found' in resp and 'shows' in resp['not_found']:
|
||||
not_found = [_i['ids']['trakt'] for _i in resp['not_found']['shows']]
|
||||
else:
|
||||
not_found = []
|
||||
return [_i for _i in show_ids if _i not in not_found]
|
||||
return []
|
||||
|
||||
def list_hidden_recommended_for_account(self, account, **kwargs):
|
||||
# type: (integer_types, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list hidden recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:return: list of hidden shows
|
||||
"""
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if not account or account not in TRAKT_ACCOUNTS or not TRAKT_ACCOUNTS[account].active:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
return self._get_show_lists('users/hidden/recommendations?type=show', account=account)
|
||||
|
||||
def get_watchlisted_for_account(self, account, result_count=100, sort='rank', **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get watchlisted shows for the account
|
||||
:param account: account to get recommendations for
|
||||
:param result_count: how many results are suppose to be returned
|
||||
:param sort: possible values: 'rank', 'added', 'released', 'title'
|
||||
"""
|
||||
from sickgear import TRAKT_ACCOUNTS
|
||||
if not account or account not in TRAKT_ACCOUNTS or not TRAKT_ACCOUNTS[account].active:
|
||||
raise TraktAuthException('Account missing or disabled')
|
||||
sort = ('rank', sort)[sort in ('rank', 'added', 'released', 'title')]
|
||||
return self._get_show_lists('users/%s/watchlist/shows/%s?extended=full&page=%d&limit=%d' %
|
||||
(TRAKT_ACCOUNTS[account].slug, sort, 1, result_count), account=account)
|
||||
|
||||
def get_anticipated(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most anticipated shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return self._get_show_lists('shows/anticipated?extended=full&page=%d&limit=%d' % (1, result_count))
|
||||
|
||||
def get_trending(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get trending shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return self._get_show_lists('shows/trending?extended=full&page=%d&limit=%d' % (1, result_count))
|
||||
|
||||
def get_popular(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get all popular shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return self._get_show_lists('shows/popular?extended=full&page=%d&limit=%d' % (1, result_count))
|
||||
|
||||
def get_similar(self, tvid, result_count=100, **kwargs):
|
||||
# type: (integer_types, int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
return list of similar shows to given id
|
||||
:param tvid: id to give similar shows for
|
||||
:param result_count: count of results requested
|
||||
"""
|
||||
if not isinstance(tvid, int):
|
||||
raise TraktException('tvid/trakt id for show required')
|
||||
return self._get_show_lists('shows/%d/related?extended=full&page=%d&limit=%d' % (tvid, 1, result_count))
|
||||
|
||||
def get_new_shows(self, result_count=100, start_date=None, days=32, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get new shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
:param start_date: start date for returned data in format: '2014-09-01'
|
||||
:param days: number of days to return from start date
|
||||
"""
|
||||
if None is start_date:
|
||||
start_date = (datetime.datetime.now() + datetime.timedelta(days=-16)).strftime('%Y-%m-%d')
|
||||
return self._get_show_lists('calendars/all/shows/new/%s/%s?extended=full&page=%d&limit=%d' %
|
||||
(start_date, days, 1, result_count))
|
||||
|
||||
def get_new_seasons(self, result_count=100, start_date=None, days=32, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get new seasons
|
||||
:param result_count: how many results are suppose to be returned
|
||||
:param start_date: start date for returned data in format: '2014-09-01'
|
||||
:param days: number of days to return from start date
|
||||
"""
|
||||
if None is start_date:
|
||||
start_date = (datetime.datetime.now() + datetime.timedelta(days=-16)).strftime('%Y-%m-%d')
|
||||
return self._get_show_lists('calendars/all/shows/premieres/%s/%s?extended=full&page=%d&limit=%d' %
|
||||
(start_date, days, 1, result_count))
|
||||
|
|
|
@ -33,7 +33,7 @@ from lib.cachecontrol import CacheControl, caches
|
|||
from lib.dateutil.parser import parse
|
||||
from lib.exceptions_helper import ConnectionSkipException
|
||||
from lib.tvinfo_base import CastList, TVInfoCharacter, CrewList, TVInfoPerson, RoleTypes, \
|
||||
TVINFO_TVDB, TVINFO_TVDB_SLUG, TVInfoBase, TVInfoIDs
|
||||
TVINFO_TVDB, TVINFO_TVDB_SLUG, TVInfoBase, TVInfoIDs, TVInfoNetwork, TVInfoShow
|
||||
|
||||
from .tvdb_exceptions import TvdbError, TvdbShownotfound, TvdbTokenexpired
|
||||
from .tvdb_ui import BaseUI, ConsoleUI
|
||||
|
@ -44,7 +44,6 @@ from six import integer_types, iteritems, PY2, string_types
|
|||
if False:
|
||||
# noinspection PyUnresolvedReferences
|
||||
from typing import Any, AnyStr, Dict, List, Optional, Union
|
||||
from lib.tvinfo_base import TVInfoShow
|
||||
|
||||
|
||||
THETVDB_V2_API_TOKEN = {'token': None, 'datetime': datetime.datetime.fromordinal(1)}
|
||||
|
@ -52,7 +51,7 @@ log = logging.getLogger('tvdb.api')
|
|||
log.addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
# noinspection PyUnusedLocal
|
||||
# noinspection HttpUrlsUsage,PyUnusedLocal
|
||||
def _record_hook(r, *args, **kwargs):
|
||||
r.hook_called = True
|
||||
if 301 == r.status_code and isinstance(r.headers.get('Location'), string_types) \
|
||||
|
@ -64,8 +63,8 @@ def _record_hook(r, *args, **kwargs):
|
|||
def retry(exception_to_check, tries=4, delay=3, backoff=2):
|
||||
"""Retry calling the decorated function using an exponential backoff.
|
||||
|
||||
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
|
||||
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
|
||||
www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
|
||||
original from: wiki.python.org/moin/PythonDecoratorLibrary#Retry
|
||||
|
||||
:param exception_to_check: the exception to check. may be a tuple of
|
||||
exceptions to check
|
||||
|
@ -222,7 +221,7 @@ class Tvdb(TVInfoBase):
|
|||
tvdb_api's own key (fine for small scripts), but you can use your
|
||||
own key if desired - this is recommended if you are embedding
|
||||
tvdb_api in a larger application)
|
||||
See http://thetvdb.com/?tab=apiregister to get your own key
|
||||
See thetvdb.com/?tab=apiregister to get your own key
|
||||
|
||||
"""
|
||||
|
||||
|
@ -334,13 +333,15 @@ class Tvdb(TVInfoBase):
|
|||
|
||||
def _search_show(self, name=None, ids=None, **kwargs):
|
||||
# type: (AnyStr, Dict[integer_types, integer_types], Optional[Any]) -> List[TVInfoShow]
|
||||
def map_data(data):
|
||||
if not data.get('poster'):
|
||||
data['poster'] = data.get('image')
|
||||
data['ids'] = TVInfoIDs(
|
||||
tvdb=data.get('id'),
|
||||
imdb=data.get('imdb_id') and try_int(data.get('imdb_id', '').replace('tt', ''), None))
|
||||
return data
|
||||
def make_tvinfoshow(data):
|
||||
_ti_show = TVInfoShow()
|
||||
_ti_show.id, _ti_show.banner, _ti_show.firstaired, _ti_show.poster, _ti_show.network, _ti_show.overview, \
|
||||
_ti_show.seriesname, _ti_show.slug, _ti_show.status, _ti_show.aliases, _ti_show.ids = \
|
||||
clean_data(data['id']), clean_data(data.get('banner')), clean_data(data.get('firstaired')), \
|
||||
clean_data(data.get('poster')), clean_data(data.get('network')), clean_data(data.get('overview')), \
|
||||
clean_data(data.get('seriesname')), clean_data(data.get('slug')), clean_data(data.get('status')), \
|
||||
clean_data((data.get('aliases'))), TVInfoIDs(tvdb=try_int(clean_data(data['id'])))
|
||||
return _ti_show
|
||||
|
||||
results = []
|
||||
if ids:
|
||||
|
@ -356,7 +357,7 @@ class Tvdb(TVInfoBase):
|
|||
else:
|
||||
d_m = shows
|
||||
if d_m:
|
||||
results = list(map(map_data, [d_m['data']]))
|
||||
results.append(make_tvinfoshow(d_m['data']))
|
||||
if ids.get(TVINFO_TVDB_SLUG):
|
||||
cache_id_key = 's-id-%s-%s' % (TVINFO_TVDB, ids[TVINFO_TVDB_SLUG])
|
||||
is_none, shows = self._get_cache_entry(cache_id_key)
|
||||
|
@ -371,7 +372,7 @@ class Tvdb(TVInfoBase):
|
|||
if d_m:
|
||||
for r in d_m:
|
||||
if ids.get(TVINFO_TVDB_SLUG) == r['slug']:
|
||||
results = list(map(map_data, [r]))
|
||||
results.append(make_tvinfoshow(r))
|
||||
break
|
||||
if name:
|
||||
for n in ([name], name)[isinstance(name, list)]:
|
||||
|
@ -388,7 +389,7 @@ class Tvdb(TVInfoBase):
|
|||
if r:
|
||||
if not isinstance(r, list):
|
||||
r = [r]
|
||||
results.extend(list(map(map_data, r)))
|
||||
results.extend([make_tvinfoshow(_s) for _s in r])
|
||||
|
||||
seen = set()
|
||||
results = [seen.add(r['id']) or r for r in results if r['id'] not in seen]
|
||||
|
@ -947,9 +948,6 @@ class Tvdb(TVInfoBase):
|
|||
role_image = self._make_image(self.config['url_artworks'], role_image)
|
||||
character_name = n.get('role', '').strip() or alts.get(n['id'], {}).get('role', '')
|
||||
person_name = n.get('name', '').strip() or alts.get(n['id'], {}).get('name', '')
|
||||
try:
|
||||
person_id = try_int(re.search(r'^person/(\d+)/', n.get('image', '')).group(1), None)
|
||||
except (BaseException, Exception):
|
||||
person_id = None
|
||||
person_id = person_id or alts.get(n['id'], {}).get('person_id')
|
||||
character_id = n.get('id', None) or alts.get(n['id'], {}).get('rid')
|
||||
|
@ -971,12 +969,12 @@ class Tvdb(TVInfoBase):
|
|||
cast[RoleTypes.ActorMain].append(
|
||||
TVInfoCharacter(
|
||||
p_id=character_id, name=character_name, person=[TVInfoPerson(p_id=person_id, name=person_name)],
|
||||
image=role_image, show=self.shows[sid]))
|
||||
image=role_image, show=self.ti_shows[sid]))
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
self._set_show_data(sid, 'actors', a)
|
||||
self._set_show_data(sid, 'cast', cast)
|
||||
self.shows[sid].actors_loaded = True
|
||||
self.ti_shows[sid].actors_loaded = True
|
||||
|
||||
def get_episode_data(self, epid):
|
||||
# Parse episode information
|
||||
|
@ -1004,7 +1002,7 @@ class Tvdb(TVInfoBase):
|
|||
mapped_img_types = {'banner': 'series'}
|
||||
excluded_main_data = enabled_type in ['seasons_enabled', 'seasonwides_enabled']
|
||||
loaded_name = '%s_loaded' % image_type
|
||||
if (type_bool or self.config[enabled_type]) and not getattr(self.shows.get(sid), loaded_name, False):
|
||||
if (type_bool or self.config[enabled_type]) and not getattr(self.ti_shows.get(sid), loaded_name, False):
|
||||
image_data = self._getetsrc(self.config['url_series_images'] %
|
||||
(sid, mapped_img_types.get(image_type, image_type)), language=language)
|
||||
if image_data and 0 < len(image_data.get('data', '') or ''):
|
||||
|
@ -1017,12 +1015,12 @@ class Tvdb(TVInfoBase):
|
|||
self._set_show_data(sid, f'{image_type}_thumb', url_thumb)
|
||||
excluded_main_data = True # artwork found so prevent fallback
|
||||
self._parse_banners(sid, image_data['data'])
|
||||
self.shows[sid].__dict__[loaded_name] = True
|
||||
self.ti_shows[sid].__dict__[loaded_name] = True
|
||||
|
||||
# fallback image thumbnail for none excluded_main_data if artwork is not found
|
||||
if not excluded_main_data and show_data['data'].get(image_type):
|
||||
if not excluded_main_data and show_data.get(image_type):
|
||||
self._set_show_data(sid, f'{image_type}_thumb',
|
||||
re.sub(r'\.jpg$', '_t.jpg', show_data['data'][image_type], flags=re.I))
|
||||
re.sub(r'\.jpg$', '_t.jpg', show_data[image_type], flags=re.I))
|
||||
|
||||
def _get_show_data(self,
|
||||
sid, # type: integer_types
|
||||
|
@ -1044,7 +1042,8 @@ class Tvdb(TVInfoBase):
|
|||
|
||||
# Parse show information
|
||||
url = self.config['url_series_info'] % sid
|
||||
if direct_data or sid not in self.shows or None is self.shows[sid].id or language != self.shows[sid].language:
|
||||
if direct_data or sid not in self.ti_shows or None is self.ti_shows[sid].id or \
|
||||
language != self.ti_shows[sid].language:
|
||||
log.debug('Getting all series data for %s' % sid)
|
||||
show_data = self._getetsrc(url, language=language)
|
||||
if not show_data or not show_data.get('data'):
|
||||
|
@ -1056,13 +1055,34 @@ class Tvdb(TVInfoBase):
|
|||
if not (show_data and 'seriesname' in show_data.get('data', {}) or {}):
|
||||
return False
|
||||
|
||||
for k, v in iteritems(show_data['data']):
|
||||
self._set_show_data(sid, k, v)
|
||||
self._set_show_data(sid, 'ids',
|
||||
TVInfoIDs(
|
||||
tvdb=show_data['data'].get('id'),
|
||||
imdb=show_data['data'].get('imdb_id')
|
||||
and try_int(show_data['data'].get('imdb_id', '').replace('tt', ''), None)))
|
||||
show_data = show_data['data']
|
||||
ti_show = self.ti_shows[sid] # type: TVInfoShow
|
||||
ti_show.banner_loaded = ti_show.poster_loaded = ti_show.fanart_loaded = True
|
||||
ti_show.id = show_data['id']
|
||||
ti_show.seriesname = clean_data(show_data.get('seriesname'))
|
||||
ti_show.slug = clean_data(show_data.get('slug'))
|
||||
ti_show.poster = clean_data(show_data.get('poster'))
|
||||
ti_show.banner = clean_data(show_data.get('banner'))
|
||||
ti_show.fanart = clean_data(show_data.get('fanart'))
|
||||
ti_show.firstaired = clean_data(show_data.get('firstAired'))
|
||||
ti_show.rating = show_data.get('rating')
|
||||
ti_show.contentrating = clean_data(show_data.get('contentRatings'))
|
||||
ti_show.aliases = show_data.get('aliases') or []
|
||||
ti_show.status = clean_data(show_data['status'])
|
||||
if clean_data(show_data.get('network')):
|
||||
ti_show.network = clean_data(show_data['network'])
|
||||
ti_show.networks = [TVInfoNetwork(clean_data(show_data['network']),
|
||||
n_id=clean_data(show_data.get('networkid')))]
|
||||
ti_show.runtime = try_int(show_data.get('runtime'), 0)
|
||||
ti_show.language = clean_data(show_data.get('language'))
|
||||
ti_show.genre = clean_data(show_data.get('genre'))
|
||||
ti_show.genre_list = clean_data(show_data.get('genre_list')) or []
|
||||
ti_show.overview = clean_data(show_data.get('overview'))
|
||||
ti_show.imdb_id = clean_data(show_data.get('imdb_id')) or None
|
||||
ti_show.airs_time = clean_data(show_data.get('airs_time'))
|
||||
ti_show.airs_dayofweek = clean_data(show_data.get('airs_dayofweek'))
|
||||
ti_show.ids = TVInfoIDs(tvdb=ti_show.id, imdb=try_int(ti_show.imdb_id.replace('tt', ''), None))
|
||||
|
||||
else:
|
||||
show_data = {'data': {}}
|
||||
|
||||
|
@ -1073,13 +1093,13 @@ class Tvdb(TVInfoBase):
|
|||
('seasonwide', 'seasonwides_enabled', seasonwides)]:
|
||||
self._parse_images(sid, language, show_data, img_type, en_type, p_type)
|
||||
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False):
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.ti_shows.get(sid), 'actors_loaded', False):
|
||||
actor_data = self._getetsrc(self.config['url_actors_info'] % sid, language=language)
|
||||
actor_data_alt = self._getetsrc(self.config['url_series_people'] % sid, language=language)
|
||||
if actor_data and 0 < len(actor_data.get('data', '') or '') or actor_data_alt and actor_data_alt['data']:
|
||||
self._parse_actors(sid, actor_data and actor_data.get('data', ''), actor_data_alt and actor_data_alt['data'])
|
||||
|
||||
if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False):
|
||||
if get_ep_info and not getattr(self.ti_shows.get(sid), 'ep_loaded', False):
|
||||
# Parse episode data
|
||||
log.debug('Getting all episodes of %s' % sid)
|
||||
|
||||
|
@ -1200,7 +1220,7 @@ class Tvdb(TVInfoBase):
|
|||
ep_no = int(float(elem_epno))
|
||||
|
||||
if not cur_ep.get('network'):
|
||||
cur_ep['network'] = self.shows[sid].network
|
||||
cur_ep['network'] = self.ti_shows[sid].network
|
||||
for k, v in iteritems(cur_ep):
|
||||
k = k.lower()
|
||||
|
||||
|
@ -1225,7 +1245,7 @@ class Tvdb(TVInfoBase):
|
|||
try:
|
||||
for guest in cur_ep.get('gueststars_list', []):
|
||||
cast[RoleTypes.ActorGuest].append(TVInfoCharacter(person=[TVInfoPerson(name=guest)],
|
||||
show=self.shows[sid]))
|
||||
show=self.ti_shows[sid]))
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
try:
|
||||
|
@ -1236,7 +1256,7 @@ class Tvdb(TVInfoBase):
|
|||
self._set_item(sid, seas_no, ep_no, 'crew', crew)
|
||||
self._set_item(sid, seas_no, ep_no, 'cast', cast)
|
||||
|
||||
self.shows[sid].ep_loaded = True
|
||||
self.ti_shows[sid].ep_loaded = True
|
||||
|
||||
return True
|
||||
|
||||
|
@ -1258,6 +1278,11 @@ class Tvdb(TVInfoBase):
|
|||
self.corrections.update(dict([(x['seriesname'], int(x['id'])) for x in selected_series]))
|
||||
return sids
|
||||
|
||||
def _get_languages(self):
|
||||
if not Tvdb._supported_languages:
|
||||
Tvdb._supported_languages = [{'id': _l, 'name': None, 'nativeName': None, 'sg_lang': _l}
|
||||
for _l in self.config['valid_languages']]
|
||||
|
||||
|
||||
def main():
|
||||
"""Simple example of using tvdb_api - it just
|
||||
|
|
|
@ -11,7 +11,7 @@ __author__ = 'dbr/Ben'
|
|||
__version__ = '1.9'
|
||||
|
||||
__all__ = ['TvdbException', 'TvdbError', 'TvdbUserabort', 'TvdbShownotfound',
|
||||
'TvdbSeasonnotfound', 'TvdbEpisodenotfound', 'TvdbAttributenotfound', 'TvdbTokenexpired']
|
||||
'TvdbSeasonnotfound', 'TvdbEpisodenotfound', 'TvdbAttributenotfound', 'TvdbTokenexpired', 'TvdbTokenFailure']
|
||||
|
||||
from lib.tvinfo_base.exceptions import *
|
||||
|
||||
|
@ -64,3 +64,9 @@ class TvdbTokenexpired(BaseTVinfoAuthenticationerror, TvdbError):
|
|||
"""token expired or missing thetvdb.com
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class TvdbTokenFailure(BaseTVinfoAuthenticationerror, TvdbError):
|
||||
"""getting token failed
|
||||
"""
|
||||
pass
|
||||
|
|
|
@ -16,7 +16,7 @@ from requests.adapters import HTTPAdapter
|
|||
from tornado._locale_data import LOCALE_NAMES
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
from sg_helpers import clean_data, get_url, try_int
|
||||
from sg_helpers import clean_data, enforce_type, get_url, try_int
|
||||
from lib.dateutil.parser import parser
|
||||
# noinspection PyProtectedMember
|
||||
from lib.dateutil.tz.tz import _datetime_to_timestamp
|
||||
|
@ -103,7 +103,7 @@ show_map = {
|
|||
# 'siteratingcount': '',
|
||||
# 'lastupdated': '',
|
||||
# 'contentrating': '',
|
||||
'rating': 'rating',
|
||||
# 'rating': 'rating',
|
||||
'status': 'status',
|
||||
'overview': 'summary',
|
||||
# 'poster': 'image',
|
||||
|
@ -152,21 +152,28 @@ class TvMaze(TVInfoBase):
|
|||
if language in cur_locale[1]['name_en'].lower():
|
||||
language_country_code = cur_locale[0].split('_')[1].lower()
|
||||
break
|
||||
return {'seriesname': clean_data(s.name), 'id': s.id, 'firstaired': clean_data(s.premiered),
|
||||
'network': clean_data((s.network and s.network.name) or (s.web_channel and s.web_channel.name)),
|
||||
'genres': clean_data(isinstance(s.genres, list) and '|'.join(g.lower() for g in s.genres) or
|
||||
s.genres),
|
||||
'overview': clean_data(s.summary), 'language': clean_data(s.language),
|
||||
'language_country_code': clean_data(language_country_code),
|
||||
'runtime': s.average_runtime or s.runtime,
|
||||
'type': clean_data(s.type), 'schedule': s.schedule, 'status': clean_data(s.status),
|
||||
'official_site': clean_data(s.official_site),
|
||||
'aliases': [clean_data(a.name) for a in s.akas], 'image': s.image and s.image.get('original'),
|
||||
'poster': s.image and s.image.get('original'),
|
||||
'ids': TVInfoIDs(
|
||||
tvdb=s.externals.get('thetvdb'), rage=s.externals.get('tvrage'), tvmaze=s.id,
|
||||
imdb=clean_data(s.externals.get('imdb') and try_int(s.externals.get('imdb').replace('tt', ''),
|
||||
None)))}
|
||||
ti_show = TVInfoShow()
|
||||
show_type = clean_data(s.type)
|
||||
if show_type:
|
||||
show_type = [show_type]
|
||||
else:
|
||||
show_type = []
|
||||
ti_show.seriesname, ti_show.id, ti_show.firstaired, ti_show.network, ti_show.genre_list, ti_show.overview, \
|
||||
ti_show.language, ti_show.runtime, ti_show.show_type, ti_show.airs_dayofweek, ti_show. status, \
|
||||
ti_show.official_site, ti_show.aliases, ti_show.poster, ti_show.ids = clean_data(s.name), s.id, \
|
||||
clean_data(s.premiered), \
|
||||
clean_data((s.network and s.network.name) or (s.web_channel and s.web_channel.name)), \
|
||||
isinstance(s.genres, list) and [clean_data(g.lower()) for g in s.genres], \
|
||||
enforce_type(clean_data(s.summary), str, ''), clean_data(s.language), \
|
||||
s.average_runtime or s.runtime, show_type, ', '.join(s.schedule['days'] or []), clean_data(s.status), \
|
||||
clean_data(s.official_site), [clean_data(a.name) for a in s.akas], \
|
||||
s.image and s.image.get('original'), \
|
||||
TVInfoIDs(tvdb=s.externals.get('thetvdb'), rage=s.externals.get('tvrage'), tvmaze=s.id,
|
||||
imdb=clean_data(s.externals.get('imdb') and
|
||||
try_int(s.externals.get('imdb').replace('tt', ''), None)))
|
||||
ti_show.genre = '|'.join(ti_show.genre_list or [])
|
||||
return ti_show
|
||||
|
||||
results = []
|
||||
if ids:
|
||||
for t, p in iteritems(ids):
|
||||
|
@ -230,18 +237,24 @@ class TvMaze(TVInfoBase):
|
|||
('episodename', 'title'), ('overview', 'summary'), ('firstaired', 'airdate'),
|
||||
('airtime', 'airtime'), ('runtime', 'runtime'),
|
||||
('seriesid', 'maze_id'), ('id', 'maze_id'), ('is_special', 'special'), ('filename', 'image')):
|
||||
if 'filename' == _k:
|
||||
if 'airtime' == _k:
|
||||
try:
|
||||
airtime = datetime.time.fromisoformat(clean_data(getattr(ep_obj, _s, getattr(empty_ep, _k))))
|
||||
except (BaseException, Exception):
|
||||
airtime = None
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number or 0, _k, airtime)
|
||||
elif 'filename' == _k:
|
||||
image = getattr(ep_obj, _s, {}) or {}
|
||||
image = image.get('original') or image.get('medium')
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k, image)
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number or 0, _k, image)
|
||||
else:
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, _k,
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number or 0, _k,
|
||||
clean_data(getattr(ep_obj, _s, getattr(empty_ep, _k))))
|
||||
|
||||
if ep_obj.airstamp:
|
||||
try:
|
||||
at = _datetime_to_timestamp(tz_p.parse(ep_obj.airstamp))
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number, 'timestamp', at)
|
||||
self._set_item(sid, ep_obj.season_number, ep_obj.episode_number or 0, 'timestamp', at)
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
|
||||
|
@ -317,140 +330,15 @@ class TvMaze(TVInfoBase):
|
|||
if not show_data:
|
||||
return False
|
||||
|
||||
ti_show = self.shows[sid] # type: TVInfoShow
|
||||
show_obj = ti_show.__dict__
|
||||
for k, v in iteritems(show_obj):
|
||||
if k not in ('cast', 'crew', 'images', 'aliases'):
|
||||
show_obj[k] = getattr(show_data, show_map.get(k, k), clean_data(show_obj[k]))
|
||||
ti_show.aliases = [clean_data(a.name) for a in show_data.akas]
|
||||
ti_show.runtime = show_data.average_runtime or show_data.runtime
|
||||
p_set = False
|
||||
if show_data.image:
|
||||
p_set = True
|
||||
ti_show.poster = show_data.image.get('original')
|
||||
ti_show.poster_thumb = show_data.image.get('medium')
|
||||
|
||||
if (banners or posters or fanart or
|
||||
any(self.config.get('%s_enabled' % t, False) for t in ('banners', 'posters', 'fanart'))) and \
|
||||
not all(getattr(ti_show, '%s_loaded' % t, False) for t in ('poster', 'banner', 'fanart')):
|
||||
if show_data.images:
|
||||
ti_show.poster_loaded = True
|
||||
ti_show.banner_loaded = True
|
||||
ti_show.fanart_loaded = True
|
||||
self._set_images(ti_show, show_data, p_set)
|
||||
|
||||
if show_data.schedule:
|
||||
if 'time' in show_data.schedule:
|
||||
ti_show.airs_time = show_data.schedule['time']
|
||||
try:
|
||||
h, m = show_data.schedule['time'].split(':')
|
||||
h, m = try_int(h, None), try_int(m, None)
|
||||
if None is not h and None is not m:
|
||||
ti_show.time = datetime.time(hour=h, minute=m)
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
if 'days' in show_data.schedule:
|
||||
ti_show.airs_dayofweek = ', '.join(show_data.schedule['days'])
|
||||
if show_data.genres:
|
||||
ti_show.genre = '|'.join(show_data.genres).lower()
|
||||
|
||||
if (actors or self.config['actors_enabled']) and not getattr(self.shows.get(sid), 'actors_loaded', False):
|
||||
if show_data.cast:
|
||||
character_person_ids = {}
|
||||
for cur_ch in ti_show.cast[RoleTypes.ActorMain]:
|
||||
character_person_ids.setdefault(cur_ch.id, []).extend([p.id for p in cur_ch.person])
|
||||
for cur_ch in show_data.cast.characters:
|
||||
existing_character = next((c for c in ti_show.cast[RoleTypes.ActorMain] if c.id == cur_ch.id),
|
||||
None) # type: Optional[TVInfoCharacter]
|
||||
person = self._convert_person(cur_ch.person)
|
||||
if existing_character:
|
||||
existing_person = next((p for p in existing_character.person
|
||||
if person.id == p.ids.get(TVINFO_TVMAZE)),
|
||||
None) # type: TVInfoPerson
|
||||
if existing_person:
|
||||
try:
|
||||
character_person_ids[cur_ch.id].remove(existing_person.id)
|
||||
except (BaseException, Exception):
|
||||
print('error')
|
||||
pass
|
||||
(existing_person.p_id, existing_person.name, existing_person.image, existing_person.gender,
|
||||
existing_person.birthdate, existing_person.deathdate, existing_person.country,
|
||||
existing_person.country_code, existing_person.country_timezone, existing_person.thumb_url,
|
||||
existing_person.url, existing_person.ids) = \
|
||||
(cur_ch.person.id, clean_data(cur_ch.person.name),
|
||||
cur_ch.person.image and cur_ch.person.image.get('original'),
|
||||
PersonGenders.named.get(
|
||||
cur_ch.person.gender and cur_ch.person.gender.lower(), PersonGenders.unknown),
|
||||
person.birthdate, person.deathdate,
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('name')),
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('code')),
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('timezone')),
|
||||
cur_ch.person.image and cur_ch.person.image.get('medium'),
|
||||
cur_ch.person.url, {TVINFO_TVMAZE: cur_ch.person.id})
|
||||
else:
|
||||
existing_character.person.append(person)
|
||||
else:
|
||||
ti_show.cast[RoleTypes.ActorMain].append(
|
||||
TVInfoCharacter(image=cur_ch.image and cur_ch.image.get('original'), name=clean_data(cur_ch.name),
|
||||
p_id=cur_ch.id, person=[person], plays_self=cur_ch.plays_self,
|
||||
thumb_url=cur_ch.image and cur_ch.image.get('medium')
|
||||
))
|
||||
|
||||
if character_person_ids:
|
||||
for cur_ch, cur_p_ids in iteritems(character_person_ids):
|
||||
if cur_p_ids:
|
||||
char = next((mc for mc in ti_show.cast[RoleTypes.ActorMain] if mc.id == cur_ch),
|
||||
None) # type: Optional[TVInfoCharacter]
|
||||
if char:
|
||||
char.person = [p for p in char.person if p.id not in cur_p_ids]
|
||||
|
||||
if show_data.cast:
|
||||
ti_show.actors = [
|
||||
{'character': {'id': ch.id,
|
||||
'name': clean_data(ch.name),
|
||||
'url': 'https://www.tvmaze.com/character/view?id=%s' % ch.id,
|
||||
'image': ch.image and ch.image.get('original'),
|
||||
},
|
||||
'person': {'id': ch.person and ch.person.id,
|
||||
'name': ch.person and clean_data(ch.person.name),
|
||||
'url': ch.person and 'https://www.tvmaze.com/person/view?id=%s' % ch.person.id,
|
||||
'image': ch.person and ch.person.image and ch.person.image.get('original'),
|
||||
'birthday': None, # not sure about format
|
||||
'deathday': None, # not sure about format
|
||||
'gender': ch.person and ch.person.gender and ch.person.gender,
|
||||
'country': ch.person and ch.person.country and
|
||||
clean_data(ch.person.country.get('name')),
|
||||
},
|
||||
} for ch in show_data.cast.characters]
|
||||
|
||||
if show_data.crew:
|
||||
for cur_cw in show_data.crew:
|
||||
rt = crew_type_names.get(cur_cw.type.lower(), RoleTypes.CrewOther)
|
||||
ti_show.crew[rt].append(
|
||||
Crew(p_id=cur_cw.person.id, name=clean_data(cur_cw.person.name),
|
||||
image=cur_cw.person.image and cur_cw.person.image.get('original'),
|
||||
gender=cur_cw.person.gender,
|
||||
birthdate=cur_cw.person.birthday, deathdate=cur_cw.person.death_day,
|
||||
country=cur_cw.person.country and cur_cw.person.country.get('name'),
|
||||
country_code=cur_cw.person.country and clean_data(cur_cw.person.country.get('code')),
|
||||
country_timezone=cur_cw.person.country
|
||||
and clean_data(cur_cw.person.country.get('timezone')),
|
||||
crew_type_name=cur_cw.type,
|
||||
)
|
||||
ti_show = self.ti_shows[sid] # type: TVInfoShow
|
||||
self._show_info_loader(
|
||||
sid, show_data, ti_show,
|
||||
load_images=banners or posters or fanart or
|
||||
any(self.config.get('%s_enabled' % t, False) for t in ('banners', 'posters', 'fanart')),
|
||||
load_actors=(actors or self.config['actors_enabled'])
|
||||
)
|
||||
|
||||
if show_data.externals:
|
||||
ti_show.ids = TVInfoIDs(tvdb=show_data.externals.get('thetvdb'),
|
||||
rage=show_data.externals.get('tvrage'),
|
||||
imdb=clean_data(show_data.externals.get('imdb') and
|
||||
try_int(show_data.externals.get('imdb').replace('tt', ''), None)))
|
||||
|
||||
if show_data.network:
|
||||
self._set_network(ti_show, show_data.network, False)
|
||||
elif show_data.web_channel:
|
||||
self._set_network(ti_show, show_data.web_channel, True)
|
||||
|
||||
if get_ep_info and not getattr(self.shows.get(sid), 'ep_loaded', False):
|
||||
if get_ep_info and not getattr(self.ti_shows.get(sid), 'ep_loaded', False):
|
||||
log.debug('Getting all episodes of %s' % sid)
|
||||
if None is show_data:
|
||||
show_data = self._get_tvm_show(sid, get_ep_info)
|
||||
|
@ -509,47 +397,262 @@ class TvMaze(TVInfoBase):
|
|||
# type: (...) -> Dict[integer_types, integer_types]
|
||||
return {sid: v.seconds_since_epoch for sid, v in iteritems(tvmaze.show_updates().updates)}
|
||||
|
||||
@staticmethod
|
||||
def _convert_person(person_obj):
|
||||
def _convert_person(self, tvmaze_person_obj):
|
||||
# type: (tvmaze.Person) -> TVInfoPerson
|
||||
ch = []
|
||||
for c in person_obj.castcredits or []:
|
||||
show = TVInfoShow()
|
||||
show.seriesname = clean_data(c.show.name)
|
||||
show.id = c.show.id
|
||||
show.firstaired = clean_data(c.show.premiered)
|
||||
show.ids = TVInfoIDs(ids={TVINFO_TVMAZE: show.id})
|
||||
show.overview = clean_data(c.show.summary)
|
||||
show.status = clean_data(c.show.status)
|
||||
_dupes = []
|
||||
for c in tvmaze_person_obj.castcredits or []:
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.seriesname = clean_data(c.show.name)
|
||||
ti_show.id = c.show.id
|
||||
ti_show.firstaired = clean_data(c.show.premiered)
|
||||
ti_show.ids = TVInfoIDs(ids={TVINFO_TVMAZE: ti_show.id})
|
||||
ti_show.overview = clean_data(c.show.summary)
|
||||
ti_show.status = clean_data(c.show.status)
|
||||
net = c.show.network or c.show.web_channel
|
||||
if net:
|
||||
show.network = clean_data(net.name)
|
||||
show.network_id = net.maze_id
|
||||
show.network_country = clean_data(net.country)
|
||||
show.network_timezone = clean_data(net.timezone)
|
||||
show.network_country_code = clean_data(net.code)
|
||||
show.network_is_stream = None is not c.show.web_channel
|
||||
ch.append(TVInfoCharacter(name=clean_data(c.character.name), show=show))
|
||||
ti_show.network = clean_data(net.name)
|
||||
ti_show.network_id = net.maze_id
|
||||
ti_show.network_country = clean_data(net.country)
|
||||
ti_show.network_country_code = clean_data(net.code)
|
||||
ti_show.network_timezone = clean_data(net.timezone)
|
||||
ti_show.network_is_stream = None is not c.show.web_channel
|
||||
ch.append(TVInfoCharacter(name=clean_data(c.character.name), ti_show=ti_show, episode_count=1))
|
||||
try:
|
||||
birthdate = person_obj.birthday and tz_p.parse(person_obj.birthday).date()
|
||||
birthdate = tvmaze_person_obj.birthday and tz_p.parse(tvmaze_person_obj.birthday).date()
|
||||
except (BaseException, Exception):
|
||||
birthdate = None
|
||||
try:
|
||||
deathdate = person_obj.death_day and tz_p.parse(person_obj.death_day).date()
|
||||
deathdate = tvmaze_person_obj.death_day and tz_p.parse(tvmaze_person_obj.death_day).date()
|
||||
except (BaseException, Exception):
|
||||
deathdate = None
|
||||
return TVInfoPerson(p_id=person_obj.id, name=clean_data(person_obj.name),
|
||||
image=person_obj.image and person_obj.image.get('original'),
|
||||
gender=PersonGenders.named.get(person_obj.gender and person_obj.gender.lower(),
|
||||
|
||||
_ti_person_obj = TVInfoPerson(
|
||||
p_id=tvmaze_person_obj.id, name=clean_data(tvmaze_person_obj.name),
|
||||
image=tvmaze_person_obj.image and tvmaze_person_obj.image.get('original'),
|
||||
gender=PersonGenders.named.get(tvmaze_person_obj.gender and tvmaze_person_obj.gender.lower(),
|
||||
PersonGenders.unknown),
|
||||
birthdate=birthdate, deathdate=deathdate,
|
||||
country=person_obj.country and clean_data(person_obj.country.get('name')),
|
||||
country_code=person_obj.country and clean_data(person_obj.country.get('code')),
|
||||
country_timezone=person_obj.country and clean_data(person_obj.country.get('timezone')),
|
||||
thumb_url=person_obj.image and person_obj.image.get('medium'),
|
||||
url=person_obj.url, ids={TVINFO_TVMAZE: person_obj.id}, characters=ch
|
||||
country=tvmaze_person_obj.country and clean_data(tvmaze_person_obj.country.get('name')),
|
||||
country_code=tvmaze_person_obj.country and clean_data(tvmaze_person_obj.country.get('code')),
|
||||
country_timezone=tvmaze_person_obj.country and clean_data(tvmaze_person_obj.country.get('timezone')),
|
||||
thumb_url=tvmaze_person_obj.image and tvmaze_person_obj.image.get('medium'),
|
||||
url=tvmaze_person_obj.url, ids=TVInfoIDs(ids={TVINFO_TVMAZE: tvmaze_person_obj.id})
|
||||
)
|
||||
|
||||
for (c_t, regular) in [(tvmaze_person_obj.castcredits or [], True),
|
||||
(tvmaze_person_obj.guestcastcredits or [], False)]:
|
||||
for c in c_t: # type: tvmaze.CastCredit
|
||||
_show = c.show or c.episode.show
|
||||
_clean_char_name = clean_data(c.character.name)
|
||||
ti_show = TVInfoShow()
|
||||
if None is not _show:
|
||||
_clean_show_name = clean_data(_show.name)
|
||||
_clean_show_id = clean_data(_show.id)
|
||||
_cur_dup = (_clean_char_name, _clean_show_id)
|
||||
if _cur_dup in _dupes:
|
||||
_co = next((_c for _c in ch if _clean_show_id == _c.ti_show.id
|
||||
and _c.name == _clean_char_name), None)
|
||||
if None is not _co:
|
||||
ti_show = _co.ti_show
|
||||
_co.episode_count += 1
|
||||
if not regular:
|
||||
ep_no = c.episode.episode_number or 0
|
||||
_co.guest_episodes_numbers.setdefault(c.episode.season_number, []).append(ep_no)
|
||||
if c.episode.season_number not in ti_show:
|
||||
season = TVInfoSeason(show=ti_show, number=c.episode.season_number)
|
||||
ti_show[c.episode.season_number] = season
|
||||
else:
|
||||
season = ti_show[c.episode.season_number]
|
||||
episode = self._make_episode(c.episode, show_obj=ti_show)
|
||||
episode.season = season
|
||||
ti_show[c.episode.season_number][ep_no] = episode
|
||||
continue
|
||||
else:
|
||||
_dupes.append(_cur_dup)
|
||||
ti_show.seriesname = clean_data(_show.name)
|
||||
ti_show.id = _show.id
|
||||
ti_show.firstaired = clean_data(_show.premiered)
|
||||
ti_show.ids = TVInfoIDs(ids={TVINFO_TVMAZE: ti_show.id})
|
||||
ti_show.overview = enforce_type(clean_data(_show.summary), str, '')
|
||||
ti_show.status = clean_data(_show.status)
|
||||
net = _show.network or _show.web_channel
|
||||
if net:
|
||||
ti_show.network = clean_data(net.name)
|
||||
ti_show.network_id = net.maze_id
|
||||
ti_show.network_country = clean_data(net.country)
|
||||
ti_show.network_timezone = clean_data(net.timezone)
|
||||
ti_show.network_country_code = clean_data(net.code)
|
||||
ti_show.network_is_stream = None is not _show.web_channel
|
||||
if c.episode:
|
||||
|
||||
ti_show.show_loaded = False
|
||||
ti_show.load_method = self._show_info_loader
|
||||
season = TVInfoSeason(show=ti_show, number=c.episode.season_number)
|
||||
ti_show[c.episode.season_number] = season
|
||||
episode = self._make_episode(c.episode, show_obj=ti_show)
|
||||
episode.season = season
|
||||
ti_show[c.episode.season_number][c.episode.episode_number or 0] = episode
|
||||
if not regular:
|
||||
_g_kw = {'guest_episodes_numbers': {c.episode.season_number: [c.episode.episode_number or 0]}}
|
||||
else:
|
||||
_g_kw = {}
|
||||
ch.append(TVInfoCharacter(name=_clean_char_name, ti_show=ti_show, regular=regular, episode_count=1,
|
||||
person=[_ti_person_obj], **_g_kw))
|
||||
_ti_person_obj.characters = ch
|
||||
return _ti_person_obj
|
||||
|
||||
def _show_info_loader(self, show_id, show_data=None, show_obj=None, load_images=True, load_actors=True):
|
||||
# type: (int, TVMazeShow, TVInfoShow, bool, bool) -> TVInfoShow
|
||||
try:
|
||||
_s_d = show_data or tvmaze.show_main_info(show_id, embed='cast')
|
||||
if _s_d:
|
||||
if None is not show_obj:
|
||||
_s_o = show_obj
|
||||
else:
|
||||
_s_o = TVInfoShow()
|
||||
show_dict = _s_o.__dict__
|
||||
for k, v in iteritems(show_dict):
|
||||
if k not in ('cast', 'crew', 'images', 'aliases', 'rating'):
|
||||
show_dict[k] = getattr(_s_d, show_map.get(k, k), clean_data(show_dict[k]))
|
||||
_s_o.aliases = [clean_data(a.name) for a in _s_d.akas]
|
||||
_s_o.runtime = _s_d.average_runtime or _s_d.runtime
|
||||
p_set = False
|
||||
if _s_d.image:
|
||||
p_set = True
|
||||
_s_o.poster = _s_d.image.get('original')
|
||||
_s_o.poster_thumb = _s_d.image.get('medium')
|
||||
|
||||
if load_images and \
|
||||
not all(getattr(_s_o, '%s_loaded' % t, False) for t in ('poster', 'banner', 'fanart')):
|
||||
if _s_d.images:
|
||||
_s_o.poster_loaded = True
|
||||
_s_o.banner_loaded = True
|
||||
_s_o.fanart_loaded = True
|
||||
self._set_images(_s_o, _s_d, p_set)
|
||||
|
||||
if _s_d.schedule:
|
||||
if 'time' in _s_d.schedule:
|
||||
_s_o.airs_time = _s_d.schedule['time']
|
||||
try:
|
||||
h, m = _s_d.schedule['time'].split(':')
|
||||
h, m = try_int(h, None), try_int(m, None)
|
||||
if None is not h and None is not m:
|
||||
_s_o.time = datetime.time(hour=h, minute=m)
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
if 'days' in _s_d.schedule:
|
||||
_s_o.airs_dayofweek = ', '.join(_s_d.schedule['days'])
|
||||
|
||||
if load_actors and not _s_o.actors_loaded:
|
||||
if _s_d.cast:
|
||||
character_person_ids = {}
|
||||
for cur_ch in _s_o.cast[RoleTypes.ActorMain]:
|
||||
character_person_ids.setdefault(cur_ch.id, []).extend([p.id for p in cur_ch.person])
|
||||
for cur_ch in _s_d.cast.characters:
|
||||
existing_character = next(
|
||||
(c for c in _s_o.cast[RoleTypes.ActorMain] if c.id == cur_ch.id),
|
||||
None) # type: Optional[TVInfoCharacter]
|
||||
person = self._convert_person(cur_ch.person)
|
||||
if existing_character:
|
||||
existing_person = next((p for p in existing_character.person
|
||||
if person.id == p.ids.get(TVINFO_TVMAZE)),
|
||||
None) # type: TVInfoPerson
|
||||
if existing_person:
|
||||
try:
|
||||
character_person_ids[cur_ch.id].remove(existing_person.id)
|
||||
except (BaseException, Exception):
|
||||
print('error')
|
||||
pass
|
||||
(existing_person.p_id, existing_person.name, existing_person.image,
|
||||
existing_person.gender,
|
||||
existing_person.birthdate, existing_person.deathdate, existing_person.country,
|
||||
existing_person.country_code, existing_person.country_timezone,
|
||||
existing_person.thumb_url,
|
||||
existing_person.url, existing_person.ids) = \
|
||||
(cur_ch.person.id, clean_data(cur_ch.person.name),
|
||||
cur_ch.person.image and cur_ch.person.image.get('original'),
|
||||
PersonGenders.named.get(
|
||||
cur_ch.person.gender and cur_ch.person.gender.lower(),
|
||||
PersonGenders.unknown),
|
||||
person.birthdate, person.deathdate,
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('name')),
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('code')),
|
||||
cur_ch.person.country and clean_data(cur_ch.person.country.get('timezone')),
|
||||
cur_ch.person.image and cur_ch.person.image.get('medium'),
|
||||
cur_ch.person.url, {TVINFO_TVMAZE: cur_ch.person.id})
|
||||
else:
|
||||
existing_character.person.append(person)
|
||||
else:
|
||||
_s_o.cast[RoleTypes.ActorMain].append(
|
||||
TVInfoCharacter(image=cur_ch.image and cur_ch.image.get('original'),
|
||||
name=clean_data(cur_ch.name),
|
||||
ids=TVInfoIDs({TVINFO_TVMAZE: cur_ch.id}),
|
||||
p_id=cur_ch.id, person=[person], plays_self=cur_ch.plays_self,
|
||||
thumb_url=cur_ch.image and cur_ch.image.get('medium'),
|
||||
ti_show=_s_o
|
||||
))
|
||||
|
||||
if character_person_ids:
|
||||
for cur_ch, cur_p_ids in iteritems(character_person_ids):
|
||||
if cur_p_ids:
|
||||
char = next((mc for mc in _s_o.cast[RoleTypes.ActorMain] if mc.id == cur_ch),
|
||||
None) # type: Optional[TVInfoCharacter]
|
||||
if char:
|
||||
char.person = [p for p in char.person if p.id not in cur_p_ids]
|
||||
|
||||
if _s_d.cast:
|
||||
_s_o.actors = [
|
||||
{'character': {'id': ch.id,
|
||||
'name': clean_data(ch.name),
|
||||
'url': 'https://www.tvmaze.com/character/view?id=%s' % ch.id,
|
||||
'image': ch.image and ch.image.get('original'),
|
||||
},
|
||||
'person': {'id': ch.person and ch.person.id,
|
||||
'name': ch.person and clean_data(ch.person.name),
|
||||
'url': ch.person and 'https://www.tvmaze.com/person/view?id=%s' % ch.person.id,
|
||||
'image': ch.person and ch.person.image and ch.person.image.get('original'),
|
||||
'birthday': None, # not sure about format
|
||||
'deathday': None, # not sure about format
|
||||
'gender': ch.person and ch.person.gender and ch.person.gender,
|
||||
'country': ch.person and ch.person.country and
|
||||
clean_data(ch.person.country.get('name')),
|
||||
},
|
||||
} for ch in _s_d.cast.characters]
|
||||
|
||||
if _s_d.crew:
|
||||
for cur_cw in _s_d.crew:
|
||||
rt = crew_type_names.get(cur_cw.type.lower(), RoleTypes.CrewOther)
|
||||
_s_o.crew[rt].append(
|
||||
Crew(p_id=cur_cw.person.id, name=clean_data(cur_cw.person.name),
|
||||
image=cur_cw.person.image and cur_cw.person.image.get('original'),
|
||||
gender=cur_cw.person.gender,
|
||||
birthdate=cur_cw.person.birthday, deathdate=cur_cw.person.death_day,
|
||||
country=cur_cw.person.country and cur_cw.person.country.get('name'),
|
||||
country_code=cur_cw.person.country and clean_data(
|
||||
cur_cw.person.country.get('code')),
|
||||
country_timezone=cur_cw.person.country
|
||||
and clean_data(cur_cw.person.country.get('timezone')),
|
||||
crew_type_name=cur_cw.type,
|
||||
)
|
||||
)
|
||||
|
||||
if _s_d.externals:
|
||||
_s_o.ids = TVInfoIDs(tvdb=_s_d.externals.get('thetvdb'),
|
||||
rage=_s_d.externals.get('tvrage'),
|
||||
imdb=clean_data(_s_d.externals.get('imdb') and
|
||||
try_int(_s_d.externals.get('imdb').replace('tt', ''),
|
||||
None)))
|
||||
|
||||
if _s_d.network:
|
||||
self._set_network(_s_o, _s_d.network, False)
|
||||
elif _s_d.web_channel:
|
||||
self._set_network(_s_o, _s_d.web_channel, True)
|
||||
|
||||
return _s_o
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
|
||||
def _search_person(self, name=None, ids=None):
|
||||
# type: (AnyStr, Dict[integer_types, integer_types]) -> List[TVInfoPerson]
|
||||
urls, result, ids = [], [], ids or {}
|
||||
|
@ -597,27 +700,31 @@ class TvMaze(TVInfoBase):
|
|||
return self._convert_person(p)
|
||||
|
||||
def get_premieres(self, **kwargs):
|
||||
# type: (...) -> List[TVInfoEpisode]
|
||||
return self._filtered_schedule(**kwargs).get('premieres')
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
return [_e.show for _e in self._filtered_schedule(**kwargs).get('premieres')]
|
||||
|
||||
def get_returning(self, **kwargs):
|
||||
# type: (...) -> List[TVInfoEpisode]
|
||||
return self._filtered_schedule(**kwargs).get('returning')
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
return [_e.show for _e in self._filtered_schedule(**kwargs).get('returning')]
|
||||
|
||||
def _make_episode(self, episode_data, show_data=None, get_images=False, get_akas=False):
|
||||
# type: (TVMazeEpisode, TVMazeShow, bool, bool) -> TVInfoEpisode
|
||||
def _make_episode(self, episode_data, show_data=None, get_images=False, get_akas=False, show_obj=None):
|
||||
# type: (TVMazeEpisode, TVMazeShow, bool, bool, TVInfoShow) -> TVInfoEpisode
|
||||
"""
|
||||
make out of TVMazeEpisode object and optionally TVMazeShow a TVInfoEpisode
|
||||
"""
|
||||
if None is not show_obj:
|
||||
ti_show = show_obj
|
||||
else:
|
||||
ti_show = TVInfoShow()
|
||||
ti_show.seriesname = clean_data(show_data.name)
|
||||
ti_show.id = show_data.maze_id
|
||||
ti_show.seriesid = ti_show.id
|
||||
ti_show.language = clean_data(show_data.language)
|
||||
ti_show.overview = clean_data(show_data.summary)
|
||||
ti_show.overview = enforce_type(clean_data(show_data.summary), str, '')
|
||||
ti_show.firstaired = clean_data(show_data.premiered)
|
||||
ti_show.runtime = show_data.average_runtime or show_data.runtime
|
||||
ti_show.vote_average = show_data.rating and show_data.rating.get('average')
|
||||
ti_show.rating = ti_show.vote_average
|
||||
ti_show.popularity = show_data.weight
|
||||
ti_show.genre_list = clean_data(show_data.genres or [])
|
||||
ti_show.genre = '|'.join(ti_show.genre_list).lower()
|
||||
|
@ -629,7 +736,7 @@ class TvMaze(TVInfoBase):
|
|||
ti_show.poster = show_data.image and show_data.image.get('original')
|
||||
if get_akas:
|
||||
ti_show.aliases = [clean_data(a.name) for a in show_data.akas]
|
||||
if 'days' in show_data.schedule:
|
||||
if show_data.schedule and 'days' in show_data.schedule:
|
||||
ti_show.airs_dayofweek = ', '.join(clean_data(show_data.schedule['days']))
|
||||
network = show_data.network or show_data.web_channel
|
||||
if network:
|
||||
|
@ -652,9 +759,12 @@ class TvMaze(TVInfoBase):
|
|||
ti_episode = TVInfoEpisode(show=ti_show)
|
||||
ti_episode.id = episode_data.maze_id
|
||||
ti_episode.seasonnumber = episode_data.season_number
|
||||
ti_episode.episodenumber = episode_data.episode_number
|
||||
ti_episode.episodenumber = episode_data.episode_number or 0
|
||||
ti_episode.episodename = clean_data(episode_data.title)
|
||||
ti_episode.airtime = clean_data(episode_data.airtime)
|
||||
try:
|
||||
ti_episode.airtime = datetime.time.fromisoformat(clean_data(episode_data.airtime))
|
||||
except (BaseException, Exception):
|
||||
ti_episode.airtime = None
|
||||
ti_episode.firstaired = clean_data(episode_data.airdate)
|
||||
if episode_data.airstamp:
|
||||
try:
|
||||
|
@ -665,8 +775,13 @@ class TvMaze(TVInfoBase):
|
|||
ti_episode.filename = episode_data.image and (episode_data.image.get('original') or
|
||||
episode_data.image.get('medium'))
|
||||
ti_episode.is_special = episode_data.is_special()
|
||||
ti_episode.overview = clean_data(episode_data.summary)
|
||||
ti_episode.overview = enforce_type(clean_data(episode_data.summary), str, '')
|
||||
ti_episode.runtime = episode_data.runtime
|
||||
if ti_episode.seasonnumber not in ti_show:
|
||||
season = TVInfoSeason(show=ti_show, number=ti_episode.seasonnumber)
|
||||
ti_show[ti_episode.seasonnumber] = season
|
||||
ti_episode.season = season
|
||||
ti_show[ti_episode.seasonnumber][ti_episode.episodenumber] = ti_episode
|
||||
return ti_episode
|
||||
|
||||
def _filtered_schedule(self, **kwargs):
|
||||
|
|
1931
lib/langcodes/__init__.py
Normal file
242
lib/langcodes/build_data.py
Normal file
|
@ -0,0 +1,242 @@
|
|||
import json
|
||||
import xml.etree.ElementTree as ET
|
||||
from langcodes.util import data_filename
|
||||
from langcodes.registry_parser import parse_registry
|
||||
|
||||
|
||||
def read_cldr_supplemental(dataname):
|
||||
cldr_supp_path = data_filename('cldr-json/cldr-json/cldr-core/supplemental')
|
||||
filename = data_filename(f'{cldr_supp_path}/{dataname}.json')
|
||||
fulldata = json.load(open(filename, encoding='utf-8'))
|
||||
if dataname == 'aliases':
|
||||
data = fulldata['supplemental']['metadata']['alias']
|
||||
else:
|
||||
data = fulldata['supplemental'][dataname]
|
||||
return data
|
||||
|
||||
|
||||
def read_iana_registry_suppress_scripts():
|
||||
scripts = {}
|
||||
for entry in parse_registry():
|
||||
if entry['Type'] == 'language' and 'Suppress-Script' in entry:
|
||||
scripts[entry['Subtag']] = entry['Suppress-Script']
|
||||
return scripts
|
||||
|
||||
|
||||
def read_iana_registry_scripts():
|
||||
scripts = set()
|
||||
for entry in parse_registry():
|
||||
if entry['Type'] == 'script':
|
||||
scripts.add(entry['Subtag'])
|
||||
return scripts
|
||||
|
||||
|
||||
def read_iana_registry_macrolanguages():
|
||||
macros = {}
|
||||
for entry in parse_registry():
|
||||
if entry['Type'] == 'language' and 'Macrolanguage' in entry:
|
||||
macros[entry['Subtag']] = entry['Macrolanguage']
|
||||
return macros
|
||||
|
||||
|
||||
def read_iana_registry_replacements():
|
||||
replacements = {}
|
||||
for entry in parse_registry():
|
||||
if entry['Type'] == 'language' and 'Preferred-Value' in entry:
|
||||
# Replacements for language codes
|
||||
replacements[entry['Subtag']] = entry['Preferred-Value']
|
||||
elif 'Tag' in entry and 'Preferred-Value' in entry:
|
||||
# Replacements for entire tags
|
||||
replacements[entry['Tag'].lower()] = entry['Preferred-Value']
|
||||
return replacements
|
||||
|
||||
|
||||
def write_python_dict(outfile, name, d):
|
||||
print(f"{name} = {{", file=outfile)
|
||||
for key in sorted(d):
|
||||
value = d[key]
|
||||
print(f" {key!r}: {value!r},", file=outfile)
|
||||
print("}", file=outfile)
|
||||
|
||||
|
||||
def write_python_set(outfile, name, s):
|
||||
print(f"{name} = {{", file=outfile)
|
||||
for key in sorted(set(s)):
|
||||
print(f" {key!r},", file=outfile)
|
||||
print("}", file=outfile)
|
||||
|
||||
|
||||
GENERATED_HEADER = "# This file is generated by build_data.py."
|
||||
|
||||
|
||||
def read_validity_regex():
|
||||
validity_options = []
|
||||
for codetype in ('language', 'region', 'script', 'variant'):
|
||||
validity_path = data_filename(f'cldr/common/validity/{codetype}.xml')
|
||||
root = ET.fromstring(open(validity_path).read())
|
||||
matches = root.findall('./idValidity/id')
|
||||
for match in matches:
|
||||
for item in match.text.strip().split():
|
||||
if '~' in item:
|
||||
assert item[-2] == '~'
|
||||
prefix = item[:-3]
|
||||
range_start = item[-3]
|
||||
range_end = item[-1]
|
||||
option = f"{prefix}[{range_start}-{range_end}]"
|
||||
validity_options.append(option)
|
||||
else:
|
||||
validity_options.append(item)
|
||||
options = '|'.join(validity_options)
|
||||
return f'^({options})$'
|
||||
|
||||
|
||||
def read_language_distances():
|
||||
language_info_path = data_filename('cldr/common/supplemental/languageInfo.xml')
|
||||
root = ET.fromstring(open(language_info_path).read())
|
||||
matches = root.findall(
|
||||
'./languageMatching/languageMatches[@type="written_new"]/languageMatch'
|
||||
)
|
||||
tag_distances = {}
|
||||
for match in matches:
|
||||
attribs = match.attrib
|
||||
n_parts = attribs['desired'].count('_') + 1
|
||||
if n_parts < 3:
|
||||
if attribs.get('oneway') == 'true':
|
||||
pairs = [(attribs['desired'], attribs['supported'])]
|
||||
else:
|
||||
pairs = [
|
||||
(attribs['desired'], attribs['supported']),
|
||||
(attribs['supported'], attribs['desired']),
|
||||
]
|
||||
for (desired, supported) in pairs:
|
||||
desired_distance = tag_distances.setdefault(desired, {})
|
||||
desired_distance[supported] = int(attribs['distance'])
|
||||
|
||||
# The 'languageInfo' data file contains distances for the unnormalized
|
||||
# tag 'sh', but we work mostly with normalized tags, and they don't
|
||||
# describe at all how to cope with this.
|
||||
#
|
||||
# 'sh' normalizes to 'sr-Latn', and when we're matching languages we
|
||||
# aren't matching scripts yet, so when 'sh' appears we'll add a
|
||||
# corresponding match for 'sr'.
|
||||
#
|
||||
# Then because we're kind of making this plan up, add 1 to the distance
|
||||
# so it's a worse match than ones that are actually clearly defined
|
||||
# in languageInfo.
|
||||
if desired == 'sh' or supported == 'sh':
|
||||
if desired == 'sh':
|
||||
desired = 'sr'
|
||||
if supported == 'sh':
|
||||
supported = 'sr'
|
||||
if desired != supported:
|
||||
# don't try to define a non-zero distance for sr <=> sr
|
||||
desired_distance = tag_distances.setdefault(desired, {})
|
||||
desired_distance[supported] = int(attribs['distance']) + 1
|
||||
|
||||
return tag_distances
|
||||
|
||||
|
||||
def build_data():
|
||||
lang_scripts = read_iana_registry_suppress_scripts()
|
||||
all_scripts = read_iana_registry_scripts()
|
||||
macrolanguages = read_iana_registry_macrolanguages()
|
||||
iana_replacements = read_iana_registry_replacements()
|
||||
language_distances = read_language_distances()
|
||||
|
||||
alias_data = read_cldr_supplemental('aliases')
|
||||
likely_subtags = read_cldr_supplemental('likelySubtags')
|
||||
replacements = {}
|
||||
|
||||
# Aliased codes can still have alpha3 codes, and there's no unified source
|
||||
# about what they are. It depends on whether the alias predates or postdates
|
||||
# ISO 639-2, which nobody should have to care about. So let's set all the
|
||||
# alpha3 codes for aliased alpha2 codes here.
|
||||
alpha3_mapping = {
|
||||
'tl': 'tgl', # even though it normalizes to 'fil'
|
||||
'in': 'ind',
|
||||
'iw': 'heb',
|
||||
'ji': 'yid',
|
||||
'jw': 'jav',
|
||||
'sh': 'hbs',
|
||||
}
|
||||
alpha3_biblio = {}
|
||||
norm_macrolanguages = {}
|
||||
for alias_type in ['languageAlias', 'scriptAlias', 'territoryAlias']:
|
||||
aliases = alias_data[alias_type]
|
||||
# Initially populate 'languageAlias' with the aliases from the IANA file
|
||||
if alias_type == 'languageAlias':
|
||||
replacements[alias_type] = iana_replacements
|
||||
replacements[alias_type]['root'] = 'und'
|
||||
else:
|
||||
replacements[alias_type] = {}
|
||||
for code, value in aliases.items():
|
||||
# Make all keys lowercase so they can be looked up
|
||||
# case-insensitively
|
||||
code = code.lower()
|
||||
|
||||
# If there are multiple replacements, take the first one. For example,
|
||||
# we just replace the Soviet Union (SU) with Russia (RU), instead of
|
||||
# trying to do something context-sensitive and poorly standardized
|
||||
# that selects one of the successor countries to the Soviet Union.
|
||||
replacement = value['_replacement'].split()[0]
|
||||
if value['_reason'] == 'macrolanguage':
|
||||
norm_macrolanguages[code] = replacement
|
||||
else:
|
||||
# CLDR tries to oversimplify some codes as it assigns aliases.
|
||||
# For example, 'nor' is the ISO alpha3 code for 'no', but CLDR
|
||||
# would prefer you use 'nb' over 'no', so it makes 'nor' an
|
||||
# alias of 'nb'. But 'nb' already has an alpha3 code, 'nob'.
|
||||
#
|
||||
# We undo this oversimplification so that we can get a
|
||||
# canonical mapping between alpha2 and alpha3 codes.
|
||||
if code == 'nor':
|
||||
replacement = 'no'
|
||||
elif code == 'mol':
|
||||
replacement = 'mo'
|
||||
elif code == 'twi':
|
||||
replacement = 'tw'
|
||||
elif code == 'bih':
|
||||
replacement = 'bh'
|
||||
|
||||
replacements[alias_type][code] = replacement
|
||||
if alias_type == 'languageAlias':
|
||||
if value['_reason'] == 'overlong':
|
||||
if replacement in alpha3_mapping:
|
||||
raise ValueError(
|
||||
"{code!r} is an alpha3 for {replacement!r}, which"
|
||||
" already has an alpha3: {orig!r}".format(
|
||||
code=code,
|
||||
replacement=replacement,
|
||||
orig=alpha3_mapping[replacement],
|
||||
)
|
||||
)
|
||||
alpha3_mapping[replacement] = code
|
||||
elif value['_reason'] == 'bibliographic':
|
||||
alpha3_biblio[replacement] = code
|
||||
|
||||
validity_regex = read_validity_regex()
|
||||
|
||||
# Write the contents of data_dicts.py.
|
||||
with open('data_dicts.py', 'w', encoding='utf-8') as outfile:
|
||||
print(GENERATED_HEADER, file=outfile)
|
||||
print("import re\n", file=outfile)
|
||||
write_python_dict(outfile, 'DEFAULT_SCRIPTS', lang_scripts)
|
||||
write_python_dict(
|
||||
outfile, 'LANGUAGE_REPLACEMENTS', replacements['languageAlias']
|
||||
)
|
||||
write_python_dict(outfile, 'LANGUAGE_ALPHA3', alpha3_mapping)
|
||||
write_python_dict(outfile, 'LANGUAGE_ALPHA3_BIBLIOGRAPHIC', alpha3_biblio)
|
||||
write_python_dict(outfile, 'SCRIPT_REPLACEMENTS', replacements['scriptAlias'])
|
||||
write_python_set(outfile, 'ALL_SCRIPTS', all_scripts)
|
||||
write_python_dict(
|
||||
outfile, 'TERRITORY_REPLACEMENTS', replacements['territoryAlias']
|
||||
)
|
||||
write_python_dict(outfile, 'MACROLANGUAGES', macrolanguages)
|
||||
write_python_dict(outfile, 'NORMALIZED_MACROLANGUAGES', norm_macrolanguages)
|
||||
write_python_dict(outfile, 'LIKELY_SUBTAGS', likely_subtags)
|
||||
write_python_dict(outfile, 'LANGUAGE_DISTANCES', language_distances)
|
||||
print(f"VALIDITY = re.compile({validity_regex!r})", file=outfile)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
build_data()
|
48462
lib/langcodes/data/language-subtag-registry.txt
Normal file
4377
lib/langcodes/data_dicts.py
Normal file
188
lib/langcodes/language_distance.py
Normal file
|
@ -0,0 +1,188 @@
|
|||
from .data_dicts import LANGUAGE_DISTANCES
|
||||
from typing import Dict, Tuple
|
||||
|
||||
|
||||
TagTriple = Tuple[str, str, str]
|
||||
_DISTANCE_CACHE: Dict[Tuple[TagTriple, TagTriple], int] = {}
|
||||
DEFAULT_LANGUAGE_DISTANCE = LANGUAGE_DISTANCES["*"]["*"]
|
||||
DEFAULT_SCRIPT_DISTANCE = LANGUAGE_DISTANCES["*_*"]["*_*"]
|
||||
DEFAULT_TERRITORY_DISTANCE = 4
|
||||
|
||||
|
||||
# Territory clusters used in territory matching:
|
||||
# Maghreb (the western Arab world)
|
||||
MAGHREB = {"MA", "DZ", "TN", "LY", "MR", "EH"}
|
||||
|
||||
# United States and its territories
|
||||
US = {"AS", "GU", "MH", "MP", "PR", "UM", "US", "VI"}
|
||||
|
||||
# Special Autonomous Regions of China
|
||||
CNSAR = {"HK", "MO"}
|
||||
|
||||
LATIN_AMERICA = {
|
||||
"419",
|
||||
# Central America
|
||||
"013",
|
||||
"BZ",
|
||||
"CR",
|
||||
"SV",
|
||||
"GT",
|
||||
"HN",
|
||||
"MX",
|
||||
"NI",
|
||||
"PA",
|
||||
# South America
|
||||
"005",
|
||||
"AR",
|
||||
"BO",
|
||||
"BR",
|
||||
"CL",
|
||||
"CO",
|
||||
"EC",
|
||||
"FK",
|
||||
"GF",
|
||||
"GY",
|
||||
"PY",
|
||||
"PE",
|
||||
"SR",
|
||||
"UY",
|
||||
"VE",
|
||||
}
|
||||
|
||||
# North and South America
|
||||
AMERICAS = {
|
||||
"019",
|
||||
# Caribbean
|
||||
"029",
|
||||
"AI",
|
||||
"AG",
|
||||
"AW",
|
||||
"BS",
|
||||
"BB",
|
||||
"VG",
|
||||
"BQ",
|
||||
"KY",
|
||||
"CU",
|
||||
"CW",
|
||||
"DM",
|
||||
"DO",
|
||||
"GD",
|
||||
"GP",
|
||||
"HT",
|
||||
"JM",
|
||||
"MQ",
|
||||
"MS",
|
||||
"PR",
|
||||
"SX",
|
||||
"BL",
|
||||
"KN",
|
||||
"LC",
|
||||
"MF",
|
||||
"VC",
|
||||
"TT",
|
||||
"TC",
|
||||
"VI",
|
||||
# Northern America
|
||||
"021",
|
||||
"BM",
|
||||
"CA",
|
||||
"GL",
|
||||
"PM",
|
||||
"US",
|
||||
# North America as a whole
|
||||
"003",
|
||||
} | LATIN_AMERICA
|
||||
|
||||
|
||||
def tuple_distance_cached(desired: TagTriple, supported: TagTriple) -> int:
|
||||
"""
|
||||
Takes in triples of (language, script, territory), which can be derived by
|
||||
'maximizing' a language tag. Returns a number from 0 to 135 indicating the
|
||||
'distance' between these for the purposes of language matching.
|
||||
"""
|
||||
# First of all, if these are identical, return quickly:
|
||||
if supported == desired:
|
||||
return 0
|
||||
|
||||
# If we've already figured it out, return the cached distance.
|
||||
if (desired, supported) in _DISTANCE_CACHE:
|
||||
return _DISTANCE_CACHE[desired, supported]
|
||||
else:
|
||||
result = _tuple_distance(desired, supported)
|
||||
_DISTANCE_CACHE[desired, supported] = result
|
||||
return result
|
||||
|
||||
|
||||
def _get2(dictionary: dict, key1: str, key2: str, default):
|
||||
return dictionary.get(key1, {}).get(key2, default)
|
||||
|
||||
|
||||
def _tuple_distance(desired: TagTriple, supported: TagTriple) -> int:
|
||||
desired_language, desired_script, desired_territory = desired
|
||||
supported_language, supported_script, supported_territory = supported
|
||||
distance = 0
|
||||
|
||||
if desired_language != supported_language:
|
||||
distance += _get2(
|
||||
LANGUAGE_DISTANCES,
|
||||
desired_language,
|
||||
supported_language,
|
||||
DEFAULT_LANGUAGE_DISTANCE,
|
||||
)
|
||||
|
||||
desired_script_pair = f"{desired_language}_{desired_script}"
|
||||
supported_script_pair = f"{supported_language}_{supported_script}"
|
||||
|
||||
if desired_script != supported_script:
|
||||
# Scripts can match other scripts, but only when paired with a
|
||||
# language. For example, there is no reason to assume someone who can
|
||||
# read 'Latn' can read 'Cyrl', but there is plenty of reason to believe
|
||||
# someone who can read 'sr-Latn' can read 'sr-Cyrl' because Serbian is
|
||||
# a language written in two scripts.
|
||||
distance += _get2(
|
||||
LANGUAGE_DISTANCES,
|
||||
desired_script_pair,
|
||||
supported_script_pair,
|
||||
DEFAULT_SCRIPT_DISTANCE,
|
||||
)
|
||||
|
||||
if desired_territory != supported_territory:
|
||||
# The rules for matching territories are too weird to implement the
|
||||
# general case efficiently. Instead of implementing all the possible
|
||||
# match rules the XML could define, instead we just reimplement the
|
||||
# rules of CLDR 36.1 here in code.
|
||||
|
||||
tdist = DEFAULT_TERRITORY_DISTANCE
|
||||
if desired_script_pair == supported_script_pair:
|
||||
if desired_language == "ar":
|
||||
if (desired_territory in MAGHREB) != (supported_territory in MAGHREB):
|
||||
tdist = 5
|
||||
elif desired_language == "en":
|
||||
if (desired_territory == "GB") and (supported_territory not in US):
|
||||
tdist = 3
|
||||
elif (desired_territory not in US) and (supported_territory == "GB"):
|
||||
tdist = 3
|
||||
elif (desired_territory in US) != (supported_territory in US):
|
||||
tdist = 5
|
||||
# This is not a rule that's spelled out in CLDR, but is implied by things
|
||||
# about territory containment mentioned in other standards. Numeric values
|
||||
# for territories, like '003', represent broad regions that contain more
|
||||
# specific territories.
|
||||
#
|
||||
# 419 is the numeric value most often seen in language codes, particularly
|
||||
# 'es-419' for Latin American Spanish. If you have a language code that
|
||||
# differs only in that its territory is more specific, like 'es-PY', it should
|
||||
# be closer to a supported 'es-419' than anything with a territory difference.
|
||||
#
|
||||
# We can implement this for 419 without becoming responsible for keeping up
|
||||
# with which countries/territories/regions contain others in the general case.
|
||||
elif desired_territory in LATIN_AMERICA and supported_territory == "419":
|
||||
tdist = 1
|
||||
elif desired_language == "es" or desired_language == "pt":
|
||||
if (desired_territory in AMERICAS) != (supported_territory in AMERICAS):
|
||||
tdist = 5
|
||||
elif desired_script_pair == "zh_Hant":
|
||||
if (desired_territory in CNSAR) != (supported_territory in CNSAR):
|
||||
tdist = 5
|
||||
distance += tdist
|
||||
return distance
|
517
lib/langcodes/language_lists.py
Normal file
|
@ -0,0 +1,517 @@
|
|||
# This is the list of language codes with the 'modern' level of support in CLDR
|
||||
# (compared to 'full', which contains many more languages). We use this as the
|
||||
# list of languages that we store specific name-to-code mappings for.
|
||||
|
||||
CLDR_LANGUAGES = {
|
||||
'af',
|
||||
'am',
|
||||
'ar',
|
||||
'az',
|
||||
'be',
|
||||
'bg',
|
||||
'bn',
|
||||
'bs',
|
||||
'ca',
|
||||
'cs',
|
||||
'cy',
|
||||
'da',
|
||||
'de',
|
||||
'el',
|
||||
'en',
|
||||
'es',
|
||||
'et',
|
||||
'eu',
|
||||
'fa',
|
||||
'fi',
|
||||
'fil',
|
||||
'fo',
|
||||
'fr',
|
||||
'ga',
|
||||
'gl',
|
||||
'gu',
|
||||
'he',
|
||||
'hi',
|
||||
'hr',
|
||||
'hu',
|
||||
'hy',
|
||||
'id',
|
||||
'is',
|
||||
'it',
|
||||
'ja',
|
||||
'ka',
|
||||
'kk',
|
||||
'km',
|
||||
'kn',
|
||||
'ko',
|
||||
'ky',
|
||||
'lo',
|
||||
'lt',
|
||||
'lv',
|
||||
'mk',
|
||||
'ml',
|
||||
'mn',
|
||||
'mr',
|
||||
'ms',
|
||||
'my',
|
||||
'nb',
|
||||
'ne',
|
||||
'nl',
|
||||
'pa',
|
||||
'pl',
|
||||
'pt',
|
||||
'ro',
|
||||
'ru',
|
||||
'si',
|
||||
'sk',
|
||||
'sl',
|
||||
'sq',
|
||||
'sr',
|
||||
'sv',
|
||||
'sw',
|
||||
'ta',
|
||||
'te',
|
||||
'th',
|
||||
'ti',
|
||||
'to',
|
||||
'tr',
|
||||
'uk',
|
||||
'und',
|
||||
'ur',
|
||||
'uz',
|
||||
'vi',
|
||||
'yue',
|
||||
'zh',
|
||||
'zu',
|
||||
}
|
||||
|
||||
|
||||
# These are the names languages that have the most entries on the English and
|
||||
# German Wiktionaries. Wiktionary only consistently identifies languages by their
|
||||
# name, making it important to be able to recognize the names.
|
||||
#
|
||||
# These lists of names are used in `tests/test_wikt_languages.py`.
|
||||
WIKT_LANGUAGE_NAMES = {}
|
||||
|
||||
WIKT_LANGUAGE_NAMES['en'] = [
|
||||
"Spanish",
|
||||
"French",
|
||||
"Latvian",
|
||||
"Latin",
|
||||
"English",
|
||||
"Mandarin",
|
||||
"Italian",
|
||||
"Portuguese",
|
||||
"Cantonese",
|
||||
"Japanese",
|
||||
"German",
|
||||
"Swedish",
|
||||
"Korean",
|
||||
"Serbo-Croatian",
|
||||
"Serbian",
|
||||
"Croatian",
|
||||
"Bosnian",
|
||||
"Finnish",
|
||||
"Vietnamese",
|
||||
"Dutch",
|
||||
"Galician",
|
||||
"Catalan",
|
||||
"Polish",
|
||||
"Danish",
|
||||
"Norwegian Nynorsk",
|
||||
"Turkish",
|
||||
"Romanian",
|
||||
"Lithuanian",
|
||||
"Ido",
|
||||
"Old French",
|
||||
"Czech",
|
||||
"Norwegian",
|
||||
# Jèrriais -- same as Norman
|
||||
"Esperanto",
|
||||
"Icelandic",
|
||||
# Old Armenian
|
||||
"Norwegian Bokmål",
|
||||
"Asturian",
|
||||
"Hungarian",
|
||||
"Proto-Germanic",
|
||||
"Russian",
|
||||
"Slovene",
|
||||
"Min Nan",
|
||||
"Scottish Gaelic",
|
||||
"Greek",
|
||||
"Irish",
|
||||
"Lojban",
|
||||
"Middle French",
|
||||
"Malay",
|
||||
"Luxembourgish",
|
||||
"Slovak",
|
||||
"Estonian",
|
||||
"Persian",
|
||||
"Venetian",
|
||||
"Old English",
|
||||
"Volapük",
|
||||
"Ladin",
|
||||
"Faroese",
|
||||
"Scots",
|
||||
"Interlingua",
|
||||
"Romansch",
|
||||
"Urdu",
|
||||
# Middle Chinese
|
||||
"Indonesian",
|
||||
"Swahili",
|
||||
"Middle English",
|
||||
"Occitan",
|
||||
"Welsh",
|
||||
"Old Norse",
|
||||
"Albanian",
|
||||
"Old Irish",
|
||||
"Old Saxon",
|
||||
"Lower Sorbian",
|
||||
"Afrikaans",
|
||||
"Ukrainian",
|
||||
"Proto-Slavic",
|
||||
"Ancient Greek",
|
||||
"Gothic",
|
||||
"Hawaiian",
|
||||
"Kurdish",
|
||||
"Tagalog",
|
||||
"Old High German",
|
||||
"Crimean Tatar",
|
||||
"Manx",
|
||||
"Sanskrit",
|
||||
"Hiligaynon",
|
||||
"West Frisian",
|
||||
"Hebrew",
|
||||
"Tok Pisin",
|
||||
"Proto-Indo-European",
|
||||
"Macedonian",
|
||||
"Novial",
|
||||
"Armenian",
|
||||
"Arabic",
|
||||
"Maltese",
|
||||
"Hakka",
|
||||
"Sicilian",
|
||||
"Ladino",
|
||||
"Basque",
|
||||
"Breton",
|
||||
# Guernésiais -- same as Norman
|
||||
"Vai",
|
||||
"Navajo",
|
||||
"Azeri",
|
||||
"Vilamovian",
|
||||
# Tarantino
|
||||
"Maori",
|
||||
"Friulian",
|
||||
"Hausa",
|
||||
"Haitian Creole",
|
||||
"Yiddish",
|
||||
"Tatar",
|
||||
"Proto-Malayo-Polynesian",
|
||||
"Aromanian",
|
||||
"Ottoman Turkish",
|
||||
"Old Provençal",
|
||||
"Northern Sami",
|
||||
"Dalmatian",
|
||||
"Bulgarian",
|
||||
"Neapolitan",
|
||||
"Cornish",
|
||||
"Middle Dutch",
|
||||
"Rapa Nui",
|
||||
# Old Portuguese
|
||||
"Egyptian Arabic",
|
||||
"Romani",
|
||||
"Tahitian",
|
||||
"Thai",
|
||||
"Limburgish",
|
||||
"Karelian",
|
||||
"Tajik",
|
||||
"Turkmen",
|
||||
"Kabardian",
|
||||
"Uzbek",
|
||||
"Samoan",
|
||||
"Mongolian",
|
||||
"Zulu",
|
||||
"Upper Sorbian",
|
||||
"Walloon",
|
||||
# Proto-Finnic
|
||||
"Frankish",
|
||||
"Mapudungun",
|
||||
"Pashto",
|
||||
"Low German",
|
||||
"Bashkir",
|
||||
"Kashubian",
|
||||
"Sranan Tongo",
|
||||
"Proto-Sino-Tibetan",
|
||||
"Norman",
|
||||
"Proto-Austronesian",
|
||||
"Marathi",
|
||||
"Rohingya",
|
||||
"Classical Nahuatl",
|
||||
# Proto-Malayic
|
||||
# German Low German
|
||||
"Fijian",
|
||||
"Zazaki",
|
||||
"Proto-Italic",
|
||||
"Old Dutch",
|
||||
"Egyptian",
|
||||
"Old Frisian",
|
||||
"Greenlandic",
|
||||
"Burmese",
|
||||
"Votic",
|
||||
"Ewe",
|
||||
"Cherokee",
|
||||
"Old Church Slavonic",
|
||||
"Quechua",
|
||||
"Mirandese",
|
||||
"Livonian",
|
||||
"Bengali",
|
||||
"Skolt Sami",
|
||||
# Proto-Balto-Slavic
|
||||
"Pitjantjatjara",
|
||||
"Georgian",
|
||||
"North Frisian",
|
||||
"Tetum",
|
||||
"Tongan",
|
||||
# Mauritian Creole
|
||||
"Torres Strait Creole",
|
||||
"Papiamentu",
|
||||
"Lao",
|
||||
"Malagasy",
|
||||
"Interlingue",
|
||||
"Aragonese",
|
||||
"Istriot",
|
||||
"Sumerian",
|
||||
"Proto-Celtic",
|
||||
"Võro",
|
||||
# Proto-Polynesian
|
||||
"Nepali",
|
||||
"Chickasaw",
|
||||
"Akkadian",
|
||||
"Middle Armenian",
|
||||
"Cimbrian",
|
||||
"Somali",
|
||||
"Sardinian",
|
||||
"Tocharian B",
|
||||
"Telugu",
|
||||
"Javanese",
|
||||
"Taos",
|
||||
"Proto-Semitic",
|
||||
# Old Prussian
|
||||
"Kyrgyz",
|
||||
"Corsican",
|
||||
"Veps",
|
||||
"Baluchi",
|
||||
"Middle Low German",
|
||||
"Middle High German",
|
||||
"Uyghur",
|
||||
# Dutch Low Saxon
|
||||
"Belarusian",
|
||||
"Guaraní",
|
||||
"Undetermined",
|
||||
"Inuktitut",
|
||||
"Tocharian A",
|
||||
"Nigerian Pidgin",
|
||||
# Gallo
|
||||
# Saterland Frisian
|
||||
"Punjabi",
|
||||
"Proto-Algonquian",
|
||||
# Istro-Romanian
|
||||
"Wiradhuri",
|
||||
"Sichuan Yi",
|
||||
"Wu",
|
||||
# White Hmong
|
||||
"Ugaritic",
|
||||
"Sundanese",
|
||||
# Old East Slavic
|
||||
# Fala
|
||||
# Elfdalian
|
||||
"Tamil",
|
||||
"Pijin",
|
||||
"Okinawan",
|
||||
"Kazakh",
|
||||
"Hindi",
|
||||
"Tuvan",
|
||||
"Polabian",
|
||||
"Aramaic",
|
||||
"Malayalam",
|
||||
"Kumyk",
|
||||
"Inari Sami",
|
||||
"Ilocano",
|
||||
"Tswana",
|
||||
"Libyan Arabic",
|
||||
"Latgalian",
|
||||
"Yakut",
|
||||
"Sindhi",
|
||||
"Khmer",
|
||||
"Gamilaraay",
|
||||
"Ojibwe",
|
||||
"Choctaw",
|
||||
"Chinese",
|
||||
"Chamorro",
|
||||
"Yucatec Maya",
|
||||
"Picard",
|
||||
"Ngarrindjeri",
|
||||
"Kott",
|
||||
"Ingrian",
|
||||
# Crimean Gothic
|
||||
"Chamicuro",
|
||||
"Rajasthani",
|
||||
# Old Tupi
|
||||
"Old Spanish",
|
||||
"Gagauz",
|
||||
"Extremaduran",
|
||||
"Chinook Jargon",
|
||||
"Cahuilla",
|
||||
"Kannada",
|
||||
"Iban",
|
||||
"American Sign Language",
|
||||
"Adyghe",
|
||||
"Warlpiri",
|
||||
"Tibetan",
|
||||
"Ossetian",
|
||||
"Meriam",
|
||||
"Marshallese",
|
||||
"Khakas",
|
||||
"Balinese",
|
||||
"Zhuang",
|
||||
"Tuvaluan",
|
||||
"Niuean",
|
||||
"Martuthunira",
|
||||
"Guugu Yimidhirr",
|
||||
"Chechen",
|
||||
"Campidanese Sardinian",
|
||||
"Tolai",
|
||||
# Old Javanese
|
||||
"Nahuatl",
|
||||
"Lombard",
|
||||
"West Coast Bajau",
|
||||
"Romagnol",
|
||||
"Middle Irish",
|
||||
"Yoruba",
|
||||
"Wangaaybuwan-Ngiyambaa",
|
||||
# Old Swedish
|
||||
"Lingala",
|
||||
"Fiji Hindi",
|
||||
"Shabo",
|
||||
"Sasak",
|
||||
"Judeo-Arabic",
|
||||
"Central Kurdish",
|
||||
"Bislama",
|
||||
]
|
||||
|
||||
WIKT_LANGUAGE_NAMES['de'] = [
|
||||
"Deutsch",
|
||||
"Englisch",
|
||||
"Polnisch",
|
||||
"Italienisch",
|
||||
"Französisch",
|
||||
"Esperanto",
|
||||
"Schwedisch",
|
||||
"Lateinisch",
|
||||
"Tschechisch",
|
||||
"Katalanisch",
|
||||
"Spanisch",
|
||||
"Okzitanisch",
|
||||
"Ungarisch",
|
||||
"Litauisch",
|
||||
"Finnisch",
|
||||
"Russisch",
|
||||
"Altgriechisch",
|
||||
"Niederländisch",
|
||||
"Kurdisch",
|
||||
"Baskisch",
|
||||
"Armenisch",
|
||||
"Isländisch",
|
||||
"Bulgarisch",
|
||||
"Färöisch",
|
||||
"Dänisch",
|
||||
"Portugiesisch",
|
||||
"Slowakisch",
|
||||
"Türkisch",
|
||||
"Maori",
|
||||
"Albanisch",
|
||||
"Japanisch",
|
||||
"Norwegisch",
|
||||
"Irisch",
|
||||
"Koreanisch",
|
||||
"Chinesisch",
|
||||
"Venezianisch",
|
||||
"Friaulisch",
|
||||
"Serbisch",
|
||||
"Indonesisch",
|
||||
"Walisisch",
|
||||
"Arabisch",
|
||||
"Zentral-Nahuatl",
|
||||
"Neugriechisch",
|
||||
"Sumerisch",
|
||||
"Obersorbisch",
|
||||
"Sesotho",
|
||||
"Rumänisch",
|
||||
"Suaheli",
|
||||
"Persisch",
|
||||
"Krimtatarisch",
|
||||
"Plattdeutsch",
|
||||
"Prußisch",
|
||||
"Thai",
|
||||
"Bosnisch",
|
||||
"Sardisch",
|
||||
"Maltesisch",
|
||||
"Akkadisch",
|
||||
"Hawaiianisch",
|
||||
"Hebräisch",
|
||||
"Gotisch",
|
||||
"Afrikaans",
|
||||
"Rätoromanisch",
|
||||
"Tamil",
|
||||
"Bretonisch",
|
||||
"Ukrainisch",
|
||||
"Hindi",
|
||||
"Georgisch",
|
||||
"Panjabi",
|
||||
"Papiamentu",
|
||||
"Slowenisch",
|
||||
"Nauruisch",
|
||||
"Schottisch-Gälisch",
|
||||
"Balinesisch",
|
||||
"Estnisch",
|
||||
"Manx",
|
||||
"Korsisch",
|
||||
# "Frühneuhochdeutsch",
|
||||
"Lettisch",
|
||||
"isiZulu",
|
||||
"Tagalog",
|
||||
"Tok Pisin",
|
||||
# "Südpikenisch",
|
||||
"Kroatisch",
|
||||
"Niedersorbisch",
|
||||
"Kannada",
|
||||
"Guanche",
|
||||
"Weißrussisch",
|
||||
"Sanskrit",
|
||||
"Aserbaidschanisch",
|
||||
"Mittelhochdeutsch",
|
||||
"Laotisch",
|
||||
"Altnordisch",
|
||||
"Altenglisch",
|
||||
"Vietnamesisch",
|
||||
"Tadschikisch",
|
||||
"Samoanisch",
|
||||
"Mazedonisch",
|
||||
"Luxemburgisch",
|
||||
"Hethitisch",
|
||||
# "Yukatekisch",
|
||||
"Kaschubisch",
|
||||
"Wallonisch",
|
||||
# "Klassisches Nahuatl",
|
||||
"Telugu",
|
||||
"Rapanui",
|
||||
"Jiddisch",
|
||||
"Ido",
|
||||
# "Galicisch",
|
||||
"Volapük",
|
||||
"Bengalisch",
|
||||
"Mapudungun",
|
||||
"Lojban",
|
||||
"Tuvaluisch",
|
||||
"Gujarati",
|
||||
"Assamesisch",
|
||||
]
|
59
lib/langcodes/registry_parser.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
from langcodes.util import data_filename
|
||||
|
||||
LIST_KEYS = {'Description', 'Prefix'}
|
||||
|
||||
|
||||
def parse_file(file):
|
||||
"""
|
||||
Take an open file containing the IANA subtag registry, and yield a
|
||||
dictionary of information for each subtag it describes.
|
||||
"""
|
||||
lines = []
|
||||
for line in file:
|
||||
line = line.rstrip('\n')
|
||||
if line == '%%':
|
||||
# This is a separator between items. Parse the data we've
|
||||
# collected and yield the result.
|
||||
yield from parse_item(lines)
|
||||
lines.clear()
|
||||
elif line.startswith(' '):
|
||||
# This is a continuation line. Concatenate it to the previous
|
||||
# line, including one of the spaces.
|
||||
lines[-1] += line[1:]
|
||||
else:
|
||||
lines.append(line)
|
||||
yield from parse_item(lines)
|
||||
|
||||
|
||||
def parse_item(lines):
|
||||
"""
|
||||
Given the lines that form a subtag entry (after joining wrapped lines
|
||||
back together), parse the data they contain.
|
||||
|
||||
Returns a generator that yields once if there was any data there
|
||||
(and an empty generator if this was just the header).
|
||||
"""
|
||||
info = {}
|
||||
for line in lines:
|
||||
key, value = line.split(': ', 1)
|
||||
if key in LIST_KEYS:
|
||||
info.setdefault(key, []).append(value)
|
||||
else:
|
||||
assert key not in info
|
||||
info[key] = value
|
||||
|
||||
if 'Subtag' in info or 'Tag' in info:
|
||||
yield info
|
||||
|
||||
|
||||
def parse_registry():
|
||||
"""
|
||||
Yield a sequence of dictionaries, containing the info in the included
|
||||
IANA subtag registry file.
|
||||
"""
|
||||
with open(
|
||||
data_filename('language-subtag-registry.txt'), encoding='utf-8'
|
||||
) as data_file:
|
||||
# 'yield from' instead of returning, so that we only close the file
|
||||
# when finished.
|
||||
yield from parse_file(data_file)
|
422
lib/langcodes/tag_parser.py
Normal file
|
@ -0,0 +1,422 @@
|
|||
"""
|
||||
This module implements a parser for language tags, according to the RFC 5646
|
||||
(BCP 47) standard.
|
||||
|
||||
Here, we're only concerned with the syntax of the language tag. Looking up
|
||||
what they actually mean in a data file is a separate step.
|
||||
|
||||
For a full description of the syntax of a language tag, see page 3 of
|
||||
http://tools.ietf.org/html/bcp47
|
||||
|
||||
>>> parse_tag('en')
|
||||
[('language', 'en')]
|
||||
|
||||
>>> parse_tag('en_US')
|
||||
[('language', 'en'), ('territory', 'US')]
|
||||
|
||||
>>> parse_tag('en-Latn')
|
||||
[('language', 'en'), ('script', 'Latn')]
|
||||
|
||||
>>> parse_tag('es-419')
|
||||
[('language', 'es'), ('territory', '419')]
|
||||
|
||||
>>> parse_tag('zh-hant-tw')
|
||||
[('language', 'zh'), ('script', 'Hant'), ('territory', 'TW')]
|
||||
|
||||
>>> parse_tag('zh-tw-hant')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: This script subtag, 'hant', is out of place. Expected variant, extension, or end of string.
|
||||
|
||||
>>> parse_tag('de-DE-1901')
|
||||
[('language', 'de'), ('territory', 'DE'), ('variant', '1901')]
|
||||
|
||||
>>> parse_tag('ja-latn-hepburn')
|
||||
[('language', 'ja'), ('script', 'Latn'), ('variant', 'hepburn')]
|
||||
|
||||
>>> parse_tag('ja-hepburn-latn')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
|
||||
|
||||
>>> parse_tag('zh-yue')
|
||||
[('language', 'zh'), ('extlang', 'yue')]
|
||||
|
||||
>>> parse_tag('zh-yue-Hant')
|
||||
[('language', 'zh'), ('extlang', 'yue'), ('script', 'Hant')]
|
||||
|
||||
>>> parse_tag('zh-min-nan')
|
||||
[('grandfathered', 'zh-min-nan')]
|
||||
|
||||
>>> parse_tag('x-dothraki')
|
||||
[('language', 'x-dothraki')]
|
||||
|
||||
>>> parse_tag('en-u-co-backward-x-pig-latin')
|
||||
[('language', 'en'), ('extension', 'u-co-backward'), ('private', 'x-pig-latin')]
|
||||
|
||||
>>> parse_tag('en-x-pig-latin-u-co-backward')
|
||||
[('language', 'en'), ('private', 'x-pig-latin-u-co-backward')]
|
||||
|
||||
>>> parse_tag('u-co-backward')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Expected a language code, got 'u'
|
||||
|
||||
>>> parse_tag('x-')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Expected 1-8 alphanumeric characters, got ''
|
||||
|
||||
>>> parse_tag('und-u-')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Expected 1-8 alphanumeric characters, got ''
|
||||
|
||||
>>> parse_tag('und-0-foo')
|
||||
[('language', 'und'), ('extension', '0-foo')]
|
||||
|
||||
>>> parse_tag('und-?-foo')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Expected 1-8 alphanumeric characters, got '?'
|
||||
|
||||
>>> parse_tag('und-x-123456789')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Expected 1-8 alphanumeric characters, got '123456789'
|
||||
|
||||
>>> parse_tag('en-a-b-foo')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Tag extensions may not contain two singletons in a row
|
||||
|
||||
>>> parse_tag('ar-٠٠١')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
langcodes.tag_parser.LanguageTagError: Language tags must be made of ASCII characters
|
||||
"""
|
||||
|
||||
# These tags should not be parsed by the usual parser; they're grandfathered
|
||||
# in from RFC 3066. The 'irregular' ones don't fit the syntax at all; the
|
||||
# 'regular' ones do, but would give meaningless results when parsed.
|
||||
#
|
||||
# These are all lowercased so they can be matched case-insensitively, as the
|
||||
# standard requires.
|
||||
EXCEPTIONS = {
|
||||
# Irregular exceptions
|
||||
"en-gb-oed",
|
||||
"i-ami",
|
||||
"i-bnn",
|
||||
"i-default",
|
||||
"i-enochian",
|
||||
"i-hak",
|
||||
"i-klingon",
|
||||
"i-lux",
|
||||
"i-mingo",
|
||||
"i-navajo",
|
||||
"i-pwn",
|
||||
"i-tao",
|
||||
"i-tay",
|
||||
"i-tsu",
|
||||
"sgn-be-fr",
|
||||
"sgn-be-nl",
|
||||
"sgn-ch-de",
|
||||
# Regular exceptions
|
||||
"art-lojban",
|
||||
"cel-gaulish",
|
||||
"no-bok",
|
||||
"no-nyn",
|
||||
"zh-guoyu",
|
||||
"zh-hakka",
|
||||
"zh-min",
|
||||
"zh-min-nan",
|
||||
"zh-xiang",
|
||||
}
|
||||
|
||||
# Define the order of subtags as integer constants, but also give them names
|
||||
# so we can describe them in error messages
|
||||
EXTLANG, SCRIPT, TERRITORY, VARIANT, EXTENSION = range(5)
|
||||
SUBTAG_TYPES = [
|
||||
'extlang',
|
||||
'script',
|
||||
'territory',
|
||||
'variant',
|
||||
'extension',
|
||||
'end of string',
|
||||
]
|
||||
|
||||
|
||||
def _is_ascii(s):
|
||||
"""
|
||||
Determine whether a tag consists of ASCII characters.
|
||||
"""
|
||||
# When Python 3.6 support is dropped, we can replace this with str.isascii().
|
||||
try:
|
||||
s.encode('ascii')
|
||||
return True
|
||||
except UnicodeEncodeError:
|
||||
return False
|
||||
|
||||
|
||||
def normalize_characters(tag):
|
||||
"""
|
||||
BCP 47 is case-insensitive, and CLDR's use of it considers underscores
|
||||
equivalent to hyphens. So here we smash tags into lowercase with hyphens,
|
||||
so we can make exact comparisons.
|
||||
|
||||
>>> normalize_characters('en_US')
|
||||
'en-us'
|
||||
>>> normalize_characters('zh-Hant_TW')
|
||||
'zh-hant-tw'
|
||||
"""
|
||||
return tag.lower().replace('_', '-')
|
||||
|
||||
|
||||
def parse_tag(tag):
|
||||
"""
|
||||
Parse the syntax of a language tag, without looking up anything in the
|
||||
registry, yet. Returns a list of (type, value) tuples indicating what
|
||||
information will need to be looked up.
|
||||
"""
|
||||
if not _is_ascii(tag):
|
||||
raise LanguageTagError("Language tags must be made of ASCII characters")
|
||||
|
||||
tag = normalize_characters(tag)
|
||||
if tag in EXCEPTIONS:
|
||||
return [('grandfathered', tag)]
|
||||
else:
|
||||
# The first subtag is always either the language code, or 'x' to mark
|
||||
# the entire tag as private-use. Other subtags are distinguished
|
||||
# by their length and format, but the language code is distinguished
|
||||
# by the fact that it is required to come first.
|
||||
subtags = tag.split('-')
|
||||
|
||||
# check all subtags for their shape: 1-8 alphanumeric characters
|
||||
for subtag in subtags:
|
||||
if len(subtag) < 1 or len(subtag) > 8 or not subtag.isalnum():
|
||||
raise LanguageTagError(
|
||||
f"Expected 1-8 alphanumeric characters, got {subtag!r}"
|
||||
)
|
||||
|
||||
if subtags[0] == 'x':
|
||||
if len(subtags) == 1:
|
||||
raise LanguageTagError("'x' is not a language tag on its own")
|
||||
# the entire language tag is private use, but we know that,
|
||||
# whatever it is, it fills the "language" slot
|
||||
return [('language', tag)]
|
||||
elif 2 <= len(subtags[0]) <= 4:
|
||||
# Language codes should be 2 or 3 letters, but 4-letter codes
|
||||
# are allowed to parse for legacy Unicode reasons
|
||||
return [('language', subtags[0])] + parse_subtags(subtags[1:])
|
||||
else:
|
||||
subtag_error(subtags[0], 'a language code')
|
||||
|
||||
|
||||
def parse_subtags(subtags, expect=EXTLANG):
|
||||
"""
|
||||
Parse everything that comes after the language tag: scripts, territories,
|
||||
variants, and assorted extensions.
|
||||
"""
|
||||
# We parse the parts of a language code recursively: each step of
|
||||
# language code parsing handles one component of the code, recurses
|
||||
# to handle the rest of the code, and adds what it found onto the
|
||||
# list of things that were in the rest of the code.
|
||||
#
|
||||
# This could just as well have been iterative, but the loops would have
|
||||
# been convoluted.
|
||||
#
|
||||
# So here's the base case.
|
||||
if not subtags:
|
||||
return []
|
||||
|
||||
# There's a subtag that comes next. We need to find out what it is.
|
||||
#
|
||||
# The primary thing that distinguishes different types of subtags is
|
||||
# length, but the subtags also come in a specified order. The 'expect'
|
||||
# parameter keeps track of where we are in that order. expect=TERRITORY,
|
||||
# for example, means we're expecting a territory code, or anything later
|
||||
# (because everything but the language is optional).
|
||||
subtag = subtags[0]
|
||||
tag_length = len(subtag)
|
||||
|
||||
# In the usual case, our goal is to recognize what kind of tag this is,
|
||||
# and set it in 'tagtype' -- as an integer, so we can compare where it
|
||||
# should go in order. You can see the enumerated list of tagtypes above,
|
||||
# where the SUBTAG_TYPES global is defined.
|
||||
tagtype = None
|
||||
|
||||
if tag_length == 1:
|
||||
# A one-letter subtag introduces an extension, which can itself have
|
||||
# sub-subtags, so we dispatch to a different function at this point.
|
||||
#
|
||||
# We don't need to check anything about the order, because extensions
|
||||
# necessarily come last.
|
||||
if subtag.isalnum():
|
||||
return parse_extension(subtags)
|
||||
else:
|
||||
subtag_error(subtag)
|
||||
|
||||
elif tag_length == 2:
|
||||
if subtag.isalpha():
|
||||
# Two-letter alphabetic subtags are territories. These are the only
|
||||
# two-character subtags after the language.
|
||||
tagtype = TERRITORY
|
||||
|
||||
elif tag_length == 3:
|
||||
if subtag.isalpha():
|
||||
# Three-letter alphabetic subtags are 'extended languages'.
|
||||
# It's allowed for there to be up to three of them in a row, so we
|
||||
# need another function to enforce that. Before we dispatch to that
|
||||
# function, though, we need to check whether we're in the right
|
||||
# place in order.
|
||||
if expect <= EXTLANG:
|
||||
return parse_extlang(subtags)
|
||||
else:
|
||||
order_error(subtag, EXTLANG, expect)
|
||||
elif subtag.isdigit():
|
||||
# Three-digit subtags are territories representing broad regions,
|
||||
# such as Latin America (419).
|
||||
tagtype = TERRITORY
|
||||
|
||||
elif tag_length == 4:
|
||||
if subtag.isalpha():
|
||||
# Four-letter alphabetic subtags are scripts.
|
||||
tagtype = SCRIPT
|
||||
elif subtag[0].isdigit():
|
||||
# Four-character subtags that start with a digit are variants.
|
||||
tagtype = VARIANT
|
||||
|
||||
else:
|
||||
# Tags of length 5-8 are variants.
|
||||
tagtype = VARIANT
|
||||
|
||||
# That's the end of the big elif block for figuring out what kind of
|
||||
# subtag we have based on its length. Now we should do something with that
|
||||
# kind of subtag.
|
||||
|
||||
if tagtype is None:
|
||||
# We haven't recognized a type of tag. This subtag just doesn't fit the
|
||||
# standard.
|
||||
subtag_error(subtag)
|
||||
|
||||
elif tagtype < expect:
|
||||
# We got a tag type that was supposed to appear earlier in the order.
|
||||
order_error(subtag, tagtype, expect)
|
||||
|
||||
else:
|
||||
# We've recognized a subtag of a particular type. If it's a territory or
|
||||
# script, we expect the next subtag to be a strictly later type, because
|
||||
# there can be at most one territory and one script. Otherwise, we expect
|
||||
# the next subtag to be the type we got or later.
|
||||
|
||||
if tagtype in (SCRIPT, TERRITORY):
|
||||
expect = tagtype + 1
|
||||
else:
|
||||
expect = tagtype
|
||||
|
||||
# Get the name of this subtag type instead of its integer value.
|
||||
typename = SUBTAG_TYPES[tagtype]
|
||||
|
||||
# Some subtags are conventionally written with capitalization. Apply
|
||||
# those conventions.
|
||||
if tagtype == SCRIPT:
|
||||
subtag = subtag.title()
|
||||
elif tagtype == TERRITORY:
|
||||
subtag = subtag.upper()
|
||||
|
||||
# Recurse on the remaining subtags.
|
||||
return [(typename, subtag)] + parse_subtags(subtags[1:], expect)
|
||||
|
||||
|
||||
def parse_extlang(subtags):
|
||||
"""
|
||||
Parse an 'extended language' tag, which consists of 1 to 3 three-letter
|
||||
language codes.
|
||||
|
||||
Extended languages are used for distinguishing dialects/sublanguages
|
||||
(depending on your view) of macrolanguages such as Arabic, Bahasa Malay,
|
||||
and Chinese.
|
||||
|
||||
It's supposed to also be acceptable to just use the sublanguage as the
|
||||
primary language code, and your code should know what's a macrolanguage of
|
||||
what. For example, 'zh-yue' and 'yue' are the same language (Cantonese),
|
||||
and differ only in whether they explicitly spell out that Cantonese is a
|
||||
kind of Chinese.
|
||||
"""
|
||||
index = 0
|
||||
parsed = []
|
||||
while index < len(subtags) and len(subtags[index]) == 3 and index < 3:
|
||||
parsed.append(('extlang', subtags[index]))
|
||||
index += 1
|
||||
return parsed + parse_subtags(subtags[index:], SCRIPT)
|
||||
|
||||
|
||||
def parse_extension(subtags):
|
||||
"""
|
||||
An extension tag consists of a 'singleton' -- a one-character subtag --
|
||||
followed by other subtags. Extension tags are in the BCP 47 syntax, but
|
||||
their meaning is outside the scope of the standard.
|
||||
|
||||
For example, there's the u- extension, which is used for setting Unicode
|
||||
properties in some context I'm not aware of.
|
||||
|
||||
If the singleton is 'x', it's a private use extension, and consumes the
|
||||
rest of the tag. Otherwise, it stops at the next singleton.
|
||||
"""
|
||||
subtag = subtags[0]
|
||||
if len(subtags) == 1:
|
||||
raise LanguageTagError(f"The subtag {subtag!r} must be followed by something")
|
||||
|
||||
if subtag == 'x':
|
||||
# Private use. Everything after this is arbitrary codes that we
|
||||
# can't look up.
|
||||
return [('private', '-'.join(subtags))]
|
||||
|
||||
else:
|
||||
# Look for the next singleton, if there is one.
|
||||
boundary = 1
|
||||
while boundary < len(subtags) and len(subtags[boundary]) != 1:
|
||||
boundary += 1
|
||||
|
||||
if boundary == 1:
|
||||
raise LanguageTagError(
|
||||
"Tag extensions may not contain two singletons in a row"
|
||||
)
|
||||
# We've parsed a complete extension subtag. Return to the main
|
||||
# parse_subtags function, but expect to find nothing but more
|
||||
# extensions at this point.
|
||||
return [('extension', '-'.join(subtags[:boundary]))] + parse_subtags(
|
||||
subtags[boundary:], EXTENSION
|
||||
)
|
||||
|
||||
|
||||
class LanguageTagError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def order_error(subtag, got, expected):
|
||||
"""
|
||||
Output an error indicating that tags were out of order.
|
||||
"""
|
||||
options = SUBTAG_TYPES[expected:]
|
||||
if len(options) == 1:
|
||||
expect_str = options[0]
|
||||
elif len(options) == 2:
|
||||
expect_str = f'{options[0]} or {options[1]}'
|
||||
else:
|
||||
joined = ', '.join(options[:-1])
|
||||
last = options[-1]
|
||||
expect_str = f'{joined}, or {last}'
|
||||
got_str = SUBTAG_TYPES[got]
|
||||
raise LanguageTagError(
|
||||
f"This {got_str} subtag, {subtag!r}, is out of place. Expected {expect_str}."
|
||||
)
|
||||
|
||||
|
||||
def subtag_error(subtag, expected='a valid subtag'):
|
||||
"""
|
||||
Try to output a reasonably helpful error message based on our state of
|
||||
parsing. Most of this code is about how to list, in English, the kinds
|
||||
of things we were expecting to find.
|
||||
"""
|
||||
raise LanguageTagError(f"Expected {expected}, got {subtag!r}")
|
8
lib/langcodes/util.py
Normal file
|
@ -0,0 +1,8 @@
|
|||
from pkg_resources import resource_filename
|
||||
|
||||
DATA_ROOT = resource_filename('langcodes', 'data')
|
||||
import os
|
||||
|
||||
|
||||
def data_filename(filename):
|
||||
return os.path.join(DATA_ROOT, filename)
|
0
lib/language_data/__init__.py
Normal file
68
lib/language_data/data/extra_language_names.csv
Normal file
|
@ -0,0 +1,68 @@
|
|||
en,av,Avar
|
||||
en,frr,North Frisian
|
||||
en,frs,East Frisian
|
||||
en,fy,West Frisian
|
||||
en,gn,Guaraní
|
||||
en,ilo,Ilocano
|
||||
en,jam,Jamaican Creole
|
||||
en,kky,Guugu Yimidhirr
|
||||
en,kky,Guugu Yimithirr
|
||||
en,ksd,Tolai
|
||||
en,liv,Livonian
|
||||
en,nay,Ngarrindjeri
|
||||
en,nmn,ǃXóõ
|
||||
en,nrf,Norman
|
||||
en,oj,Ojibwe
|
||||
en,pap,Papiamentu
|
||||
en,pms,Piedmontese
|
||||
en,rap,Rapa Nui
|
||||
en,rm,Romansch
|
||||
en,rom,Romani
|
||||
en,ryu,Okinawan
|
||||
en,sl,Slovene
|
||||
en,st,Sesotho
|
||||
en,tvl,Tuvaluan
|
||||
en,twf,Taos
|
||||
en,txb,Tocharian B
|
||||
en,tyv,Tuvan
|
||||
en,vma,Martuthunira
|
||||
en,wym,Vilamovian
|
||||
en,xto,Tocharian A
|
||||
en,zu,isiZulu
|
||||
de,el,Neugriechisch
|
||||
de,la,Lateinisch
|
||||
de,fur,Friaulisch
|
||||
de,gd,Schottisch-Gälisch
|
||||
de,haw,Hawaiianisch
|
||||
de,nds,Plattdeutsch
|
||||
de,nhn,Zentral-Nahuatl
|
||||
de,pa,Panjabi
|
||||
de,pap,Papiamentu
|
||||
de,prg,Prußisch
|
||||
de,vec,Venezianisch
|
||||
de,tvl,Tuvaluisch
|
||||
sh,sh,Srpskohrvatski
|
||||
la,la,Lingua latina
|
||||
ceb,ceb,Sinugbuanong Binisayâ
|
||||
ceb,ceb,Bisayâ
|
||||
ceb,ceb,Bisaya
|
||||
lah,lah,لہندا پنجابی
|
||||
bho,bho,भोजपुरी
|
||||
ang,ang,Ænglisc
|
||||
vo,vo,Volapük
|
||||
io,io,Ido
|
||||
jbo,jbo,lojban
|
||||
jbo,jbo,lojbau
|
||||
rup,rup,armãneashti
|
||||
nv,nv,Diné bizaad
|
||||
zh-Hant,nan,閩南語
|
||||
zh-Hans,nan,闽南语
|
||||
nan-Latn,nan,Bân-lâm-gú
|
||||
zh-Hant,hak,客家語
|
||||
zh-Hans,hak,客家语
|
||||
ilo,ilo,Ilokano
|
||||
hil,hil,Ilonggo
|
||||
nah,nah,Nāhuatl
|
||||
tpi,tpi,Tok Pisin
|
||||
ve,ve,tshiVenḓa
|
||||
kcm,kcm,Kristang
|
|
48462
lib/language_data/data/language-subtag-registry.txt
Normal file
442
lib/language_data/data/languageInfo.xml
Normal file
|
@ -0,0 +1,442 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!DOCTYPE supplementalData SYSTEM "../../common/dtd/ldmlSupplemental.dtd">
|
||||
<!--
|
||||
Copyright © 1991-2020 Unicode, Inc.
|
||||
CLDR data files are interpreted according to the LDML specification (http://unicode.org/reports/tr35/)
|
||||
For terms of use, see http://www.unicode.org/copyright.html
|
||||
-->
|
||||
<supplementalData>
|
||||
<version number="$Revision$"/>
|
||||
<languageMatching>
|
||||
<languageMatches type="written_new">
|
||||
<paradigmLocales locales="en en_GB es es_419 pt_BR pt_PT"/>
|
||||
<matchVariable id="$enUS" value="AS+GU+MH+MP+PR+UM+US+VI"/>
|
||||
<matchVariable id="$cnsar" value="HK+MO"/>
|
||||
<matchVariable id="$americas" value="019"/>
|
||||
<matchVariable id="$maghreb" value="MA+DZ+TN+LY+MR+EH"/>
|
||||
<languageMatch desired="no" supported="nb" distance="1"/> <!-- no ⇒ nb -->
|
||||
<!-- languageMatch desired="ku" supported="ckb" distance="4" oneway="true"/ --> <!-- ku ⇒ ckb -->
|
||||
<!-- languageMatch desired="ckb" supported="ku" percent="8" oneway="true"/ --> <!-- ckb ⇒ ku -->
|
||||
<languageMatch desired="hr" supported="bs" distance="4"/> <!-- hr ⇒ bs -->
|
||||
<languageMatch desired="sh" supported="bs" distance="4"/> <!-- sh ⇒ bs -->
|
||||
<!-- languageMatch desired="sr" supported="bs" distance="4"/--> <!-- sr ⇒ bs -->
|
||||
<languageMatch desired="sh" supported="hr" distance="4"/> <!-- sh ⇒ hr -->
|
||||
<!-- languageMatch desired="sr" supported="hr" distance="4"/--> <!-- sr ⇒ hr -->
|
||||
<languageMatch desired="sh" supported="sr" distance="4"/> <!-- sh ⇒ sr -->
|
||||
<languageMatch desired="ssy" supported="aa" distance="4"/> <!-- ssy ⇒ aa -->
|
||||
<languageMatch desired="gsw" supported="de" distance="4" oneway="true"/> <!-- gsw ⇒ de -->
|
||||
<languageMatch desired="lb" supported="de" distance="4" oneway="true"/> <!-- lb ⇒ de -->
|
||||
<languageMatch desired="da" supported="no" distance="8"/> <!-- da ⇒ no -->
|
||||
<languageMatch desired="da" supported="nb" distance="8"/> <!-- da ⇒ nb -->
|
||||
<!-- various fallbacks for more or less loosely related languages -->
|
||||
<!-- CLDR-13528:
|
||||
Distance 20 for some linguistic relation (e.g., Creoles to French)
|
||||
or a local language in the area of another (e.g., Breton to French).
|
||||
Distance 30 for fallbacks to prevalent second languages,
|
||||
and in the absence of better information. -->
|
||||
<languageMatch desired="ab" supported="ru" distance="30" oneway="true"/> <!-- Abkhazian: ab ⇒ ru -->
|
||||
<languageMatch desired="ach" supported="en" distance="30" oneway="true"/> <!-- Acoli (Southern Luo dialect in Uganda): ach ⇒ en -->
|
||||
<languageMatch desired="af" supported="nl" distance="20" oneway="true"/> <!-- Afrikaans: af ⇒ nl -->
|
||||
<languageMatch desired="ak" supported="en" distance="30" oneway="true"/> <!-- Akan: ak ⇒ en -->
|
||||
<languageMatch desired="ay" supported="es" distance="20" oneway="true"/> <!-- Aymara: ay ⇒ es -->
|
||||
<languageMatch desired="az" supported="ru" distance="30" oneway="true"/> <!-- Azerbaijani: az ⇒ ru -->
|
||||
<languageMatch desired="be" supported="ru" distance="20" oneway="true"/> <!-- Belarusian: be ⇒ ru -->
|
||||
<languageMatch desired="bem" supported="en" distance="30" oneway="true"/> <!-- Bemba (Zambia): bem ⇒ en -->
|
||||
<languageMatch desired="bh" supported="hi" distance="30" oneway="true"/> <!-- Bihari languages (gets canonicalized to bho): bh ⇒ hi -->
|
||||
<languageMatch desired="bn" supported="en" distance="30" oneway="true"/> <!-- Bangla: bn ⇒ en -->
|
||||
<languageMatch desired="br" supported="fr" distance="20" oneway="true"/> <!-- Breton: br ⇒ fr -->
|
||||
<languageMatch desired="ceb" supported="fil" distance="30" oneway="true"/> <!-- Cebuano: ceb ⇒ fil -->
|
||||
<languageMatch desired="chr" supported="en" distance="20" oneway="true"/> <!-- Cherokee: chr ⇒ en -->
|
||||
<languageMatch desired="ckb" supported="ar" distance="30" oneway="true"/> <!-- Sorani Kurdish: ckb ⇒ ar -->
|
||||
<languageMatch desired="co" supported="fr" distance="20" oneway="true"/> <!-- Corsican: co ⇒ fr -->
|
||||
<languageMatch desired="crs" supported="fr" distance="20" oneway="true"/> <!-- Seselwa Creole French: crs ⇒ fr -->
|
||||
<languageMatch desired="cy" supported="en" distance="20" oneway="true"/> <!-- Welsh: cy ⇒ en -->
|
||||
<languageMatch desired="ee" supported="en" distance="30" oneway="true"/> <!-- Ewe: ee ⇒ en -->
|
||||
<languageMatch desired="eo" supported="en" distance="30" oneway="true"/> <!-- Esperanto: eo ⇒ en -->
|
||||
|
||||
<!-- CLDR-13650: No fallback for Estonian -->
|
||||
<!-- languageMatch desired="et" supported="fi" distance="30" oneway="true"/--> <!-- Estonian: et ⇒ fi -->
|
||||
|
||||
<languageMatch desired="eu" supported="es" distance="20" oneway="true"/> <!-- Basque: eu ⇒ es -->
|
||||
<languageMatch desired="fo" supported="da" distance="20" oneway="true"/> <!-- Faroese: fo ⇒ da -->
|
||||
<languageMatch desired="fy" supported="nl" distance="20" oneway="true"/> <!-- Western Frisian: fy ⇒ nl -->
|
||||
<languageMatch desired="ga" supported="en" distance="20" oneway="true"/> <!-- Irish: ga ⇒ en -->
|
||||
<languageMatch desired="gaa" supported="en" distance="30" oneway="true"/> <!-- Ga: gaa ⇒ en -->
|
||||
<languageMatch desired="gd" supported="en" distance="20" oneway="true"/> <!-- Scottish Gaelic: gd ⇒ en -->
|
||||
<languageMatch desired="gl" supported="es" distance="20" oneway="true"/> <!-- Galician: gl ⇒ es -->
|
||||
<languageMatch desired="gn" supported="es" distance="20" oneway="true"/> <!-- Guarani: gn ⇒ es -->
|
||||
<languageMatch desired="gu" supported="hi" distance="30" oneway="true"/> <!-- Gujarati: gu ⇒ hi -->
|
||||
<languageMatch desired="ha" supported="en" distance="30" oneway="true"/> <!-- Hausa: ha ⇒ en -->
|
||||
<languageMatch desired="haw" supported="en" distance="20" oneway="true"/> <!-- Hawaiian: haw ⇒ en -->
|
||||
<languageMatch desired="ht" supported="fr" distance="20" oneway="true"/> <!-- Haitian Creole: ht ⇒ fr -->
|
||||
<languageMatch desired="hy" supported="ru" distance="30" oneway="true"/> <!-- Armenian: hy ⇒ ru -->
|
||||
<languageMatch desired="ia" supported="en" distance="30" oneway="true"/> <!-- Interlingua: ia ⇒ en -->
|
||||
<languageMatch desired="ig" supported="en" distance="30" oneway="true"/> <!-- Igbo: ig ⇒ en -->
|
||||
<languageMatch desired="is" supported="en" distance="20" oneway="true"/> <!-- Icelandic: is ⇒ en -->
|
||||
<languageMatch desired="jv" supported="id" distance="20" oneway="true"/> <!-- Javanese: jv ⇒ id -->
|
||||
<languageMatch desired="ka" supported="en" distance="30" oneway="true"/> <!-- Georgian: ka ⇒ en -->
|
||||
<languageMatch desired="kg" supported="fr" distance="30" oneway="true"/> <!-- Kongo: kg ⇒ fr -->
|
||||
<languageMatch desired="kk" supported="ru" distance="30" oneway="true"/> <!-- Kazakh: kk ⇒ ru -->
|
||||
<languageMatch desired="km" supported="en" distance="30" oneway="true"/> <!-- Khmer: km ⇒ en -->
|
||||
<languageMatch desired="kn" supported="en" distance="30" oneway="true"/> <!-- Kannada: kn ⇒ en -->
|
||||
<languageMatch desired="kri" supported="en" distance="30" oneway="true"/> <!-- Krio: kri ⇒ en -->
|
||||
<languageMatch desired="ku" supported="tr" distance="30" oneway="true"/> <!-- Kurdish: ku ⇒ tr -->
|
||||
<languageMatch desired="ky" supported="ru" distance="30" oneway="true"/> <!-- Kirghiz: ky ⇒ ru -->
|
||||
<languageMatch desired="la" supported="it" distance="20" oneway="true"/> <!-- Latin: la ⇒ it -->
|
||||
<languageMatch desired="lg" supported="en" distance="30" oneway="true"/> <!-- Luganda: lg ⇒ en -->
|
||||
<languageMatch desired="ln" supported="fr" distance="30" oneway="true"/> <!-- Lingala: ln ⇒ fr -->
|
||||
<languageMatch desired="lo" supported="en" distance="30" oneway="true"/> <!-- Lao: lo ⇒ en -->
|
||||
<languageMatch desired="loz" supported="en" distance="30" oneway="true"/> <!-- Lozi: loz ⇒ en -->
|
||||
<languageMatch desired="lua" supported="fr" distance="30" oneway="true"/> <!-- Luba-Lulua: lua ⇒ fr -->
|
||||
<languageMatch desired="mfe" supported="en" distance="30" oneway="true"/> <!-- Morisyen: mfe ⇒ en -->
|
||||
<languageMatch desired="mg" supported="fr" distance="30" oneway="true"/> <!-- Malagasy: mg ⇒ fr -->
|
||||
<languageMatch desired="mi" supported="en" distance="20" oneway="true"/> <!-- Maori: mi ⇒ en -->
|
||||
|
||||
<!-- CLDR-13625: Macedonian should not fall back to Bulgarian -->
|
||||
<!-- languageMatch desired="mk" supported="bg" distance="30" oneway="true"/--> <!-- Macedonian: mk ⇒ bg -->
|
||||
|
||||
<languageMatch desired="ml" supported="en" distance="30" oneway="true"/> <!-- Malayalam: ml ⇒ en -->
|
||||
<languageMatch desired="mn" supported="ru" distance="30" oneway="true"/> <!-- Mongolian: mn ⇒ ru -->
|
||||
<languageMatch desired="mr" supported="hi" distance="30" oneway="true"/> <!-- Marathi: mr ⇒ hi -->
|
||||
<languageMatch desired="ms" supported="id" distance="30" oneway="true"/> <!-- Malay: ms ⇒ id -->
|
||||
<languageMatch desired="mt" supported="en" distance="30" oneway="true"/> <!-- Maltese: mt ⇒ en -->
|
||||
<languageMatch desired="my" supported="en" distance="30" oneway="true"/> <!-- Myanmar: my ⇒ en -->
|
||||
<languageMatch desired="ne" supported="en" distance="30" oneway="true"/> <!-- Nepali: ne ⇒ en -->
|
||||
<languageMatch desired="nn" supported="nb" distance="20"/> <!-- Nynorsk: nn ⟺ nb -->
|
||||
<languageMatch desired="nn" supported="no" distance="20"/> <!-- Nynorsk: nn ⟺ no; CLDR-13679 -->
|
||||
<languageMatch desired="nso" supported="en" distance="30" oneway="true"/> <!-- Northern Sotho: nso ⇒ en -->
|
||||
<languageMatch desired="ny" supported="en" distance="30" oneway="true"/> <!-- Nyanja: ny ⇒ en -->
|
||||
<languageMatch desired="nyn" supported="en" distance="30" oneway="true"/> <!-- Nyankole: nyn ⇒ en -->
|
||||
<languageMatch desired="oc" supported="fr" distance="20" oneway="true"/> <!-- Occitan: oc ⇒ fr -->
|
||||
<languageMatch desired="om" supported="en" distance="30" oneway="true"/> <!-- Oromo: om ⇒ en -->
|
||||
<languageMatch desired="or" supported="en" distance="30" oneway="true"/> <!-- Odia: or ⇒ en -->
|
||||
<languageMatch desired="pa" supported="en" distance="30" oneway="true"/> <!-- Punjabi: pa ⇒ en -->
|
||||
<languageMatch desired="pcm" supported="en" distance="20" oneway="true"/> <!-- Nigerian Pidgin: pcm ⇒ en -->
|
||||
<languageMatch desired="ps" supported="en" distance="30" oneway="true"/> <!-- Pashto: ps ⇒ en -->
|
||||
<languageMatch desired="qu" supported="es" distance="30" oneway="true"/> <!-- Quechua: qu ⇒ es -->
|
||||
<languageMatch desired="rm" supported="de" distance="20" oneway="true"/> <!-- Romansh: rm ⇒ de -->
|
||||
<languageMatch desired="rn" supported="en" distance="30" oneway="true"/> <!-- Rundi: rn ⇒ en -->
|
||||
<languageMatch desired="rw" supported="fr" distance="30" oneway="true"/> <!-- Kinyarwanda: rw ⇒ fr -->
|
||||
<languageMatch desired="sa" supported="hi" distance="30" oneway="true"/> <!-- Sanskrit: sa ⇒ hi -->
|
||||
<languageMatch desired="sd" supported="en" distance="30" oneway="true"/> <!-- Sindhi: sd ⇒ en -->
|
||||
<languageMatch desired="si" supported="en" distance="30" oneway="true"/> <!-- Sinhalese: si ⇒ en -->
|
||||
<languageMatch desired="sn" supported="en" distance="30" oneway="true"/> <!-- Shona: sn ⇒ en -->
|
||||
<languageMatch desired="so" supported="en" distance="30" oneway="true"/> <!-- Somali: so ⇒ en -->
|
||||
<languageMatch desired="sq" supported="en" distance="30" oneway="true"/> <!-- Albanian: sq ⇒ en -->
|
||||
<languageMatch desired="st" supported="en" distance="30" oneway="true"/> <!-- Southern Sotho: st ⇒ en -->
|
||||
<languageMatch desired="su" supported="id" distance="20" oneway="true"/> <!-- Sundanese: su ⇒ id -->
|
||||
<languageMatch desired="sw" supported="en" distance="30" oneway="true"/> <!-- Swahili: sw ⇒ en -->
|
||||
<languageMatch desired="ta" supported="en" distance="30" oneway="true"/> <!-- Tamil: ta ⇒ en -->
|
||||
<languageMatch desired="te" supported="en" distance="30" oneway="true"/> <!-- Telugu: te ⇒ en -->
|
||||
<languageMatch desired="tg" supported="ru" distance="30" oneway="true"/> <!-- Tajik: tg ⇒ ru -->
|
||||
<languageMatch desired="ti" supported="en" distance="30" oneway="true"/> <!-- Tigrinya: ti ⇒ en -->
|
||||
<languageMatch desired="tk" supported="ru" distance="30" oneway="true"/> <!-- Turkmen: tk ⇒ ru -->
|
||||
<languageMatch desired="tlh" supported="en" distance="30" oneway="true"/> <!-- Klingon: tlh ⇒ en -->
|
||||
<languageMatch desired="tn" supported="en" distance="30" oneway="true"/> <!-- Tswana: tn ⇒ en -->
|
||||
<languageMatch desired="to" supported="en" distance="30" oneway="true"/> <!-- Tonga: to ⇒ en -->
|
||||
<languageMatch desired="tt" supported="ru" distance="30" oneway="true"/> <!-- Tatar: tt ⇒ ru -->
|
||||
<languageMatch desired="tum" supported="en" distance="30" oneway="true"/> <!-- Tumbuka: tum ⇒ en -->
|
||||
<languageMatch desired="ug" supported="zh" distance="20" oneway="true"/> <!-- Uighur: ug ⇒ zh -->
|
||||
<languageMatch desired="ur" supported="en" distance="30" oneway="true"/> <!-- Urdu: ur ⇒ en -->
|
||||
<languageMatch desired="uz" supported="ru" distance="30" oneway="true"/> <!-- Uzbek: uz ⇒ ru -->
|
||||
<languageMatch desired="wo" supported="fr" distance="30" oneway="true"/> <!-- Wolof: wo ⇒ fr -->
|
||||
<languageMatch desired="xh" supported="en" distance="30" oneway="true"/> <!-- Xhosa: xh ⇒ en -->
|
||||
<languageMatch desired="yi" supported="en" distance="30" oneway="true"/> <!-- Yiddish: yi ⇒ en -->
|
||||
<languageMatch desired="yo" supported="en" distance="30" oneway="true"/> <!-- Yoruba: yo ⇒ en -->
|
||||
<languageMatch desired="zu" supported="en" distance="30" oneway="true"/> <!-- Zulu: zu ⇒ en -->
|
||||
|
||||
<!-- START generated by GenerateLanguageMatches.java: don't manually change -->
|
||||
<!-- Encompassed by Arabic -->
|
||||
<languageMatch desired="aao" supported="ar" distance="10" oneway="true"/> <!-- Algerian Saharan Arabic -->
|
||||
<languageMatch desired="abh" supported="ar" distance="10" oneway="true"/> <!-- Tajiki Arabic -->
|
||||
<languageMatch desired="abv" supported="ar" distance="10" oneway="true"/> <!-- Baharna Arabic -->
|
||||
<languageMatch desired="acm" supported="ar" distance="10" oneway="true"/> <!-- Mesopotamian Arabic -->
|
||||
<languageMatch desired="acq" supported="ar" distance="10" oneway="true"/> <!-- Ta'izzi-Adeni Arabic -->
|
||||
<languageMatch desired="acw" supported="ar" distance="10" oneway="true"/> <!-- Hijazi Arabic -->
|
||||
<languageMatch desired="acx" supported="ar" distance="10" oneway="true"/> <!-- Omani Arabic -->
|
||||
<languageMatch desired="acy" supported="ar" distance="10" oneway="true"/> <!-- Cypriot Arabic -->
|
||||
<languageMatch desired="adf" supported="ar" distance="10" oneway="true"/> <!-- Dhofari Arabic -->
|
||||
<languageMatch desired="aeb" supported="ar" distance="10" oneway="true"/> <!-- Tunisian Arabic -->
|
||||
<languageMatch desired="aec" supported="ar" distance="10" oneway="true"/> <!-- Saidi Arabic -->
|
||||
<languageMatch desired="afb" supported="ar" distance="10" oneway="true"/> <!-- Gulf Arabic -->
|
||||
<languageMatch desired="ajp" supported="ar" distance="10" oneway="true"/> <!-- South Levantine Arabic -->
|
||||
<languageMatch desired="apc" supported="ar" distance="10" oneway="true"/> <!-- North Levantine Arabic -->
|
||||
<languageMatch desired="apd" supported="ar" distance="10" oneway="true"/> <!-- Sudanese Arabic -->
|
||||
<languageMatch desired="arq" supported="ar" distance="10" oneway="true"/> <!-- Algerian Arabic -->
|
||||
<languageMatch desired="ars" supported="ar" distance="10" oneway="true"/> <!-- Najdi Arabic -->
|
||||
<languageMatch desired="ary" supported="ar" distance="10" oneway="true"/> <!-- Moroccan Arabic -->
|
||||
<languageMatch desired="arz" supported="ar" distance="10" oneway="true"/> <!-- Egyptian Arabic -->
|
||||
<languageMatch desired="auz" supported="ar" distance="10" oneway="true"/> <!-- Uzbeki Arabic -->
|
||||
<languageMatch desired="avl" supported="ar" distance="10" oneway="true"/> <!-- Eastern Egyptian Bedawi Arabic -->
|
||||
<languageMatch desired="ayh" supported="ar" distance="10" oneway="true"/> <!-- Hadrami Arabic -->
|
||||
<languageMatch desired="ayl" supported="ar" distance="10" oneway="true"/> <!-- Libyan Arabic -->
|
||||
<languageMatch desired="ayn" supported="ar" distance="10" oneway="true"/> <!-- Sanaani Arabic -->
|
||||
<languageMatch desired="ayp" supported="ar" distance="10" oneway="true"/> <!-- North Mesopotamian Arabic -->
|
||||
<languageMatch desired="bbz" supported="ar" distance="10" oneway="true"/> <!-- Babalia Creole Arabic -->
|
||||
<languageMatch desired="pga" supported="ar" distance="10" oneway="true"/> <!-- Sudanese Creole Arabic -->
|
||||
<languageMatch desired="shu" supported="ar" distance="10" oneway="true"/> <!-- Chadian Arabic -->
|
||||
<languageMatch desired="ssh" supported="ar" distance="10" oneway="true"/> <!-- Shihhi Arabic -->
|
||||
<!-- Encompassed by Azerbaijani -->
|
||||
<languageMatch desired="azb" supported="az" distance="10" oneway="true"/> <!-- South Azerbaijani -->
|
||||
<!-- Encompassed by Estonian -->
|
||||
<languageMatch desired="vro" supported="et" distance="10" oneway="true"/> <!-- Võro -->
|
||||
<!-- Encompassed by Fulah -->
|
||||
<languageMatch desired="ffm" supported="ff" distance="10" oneway="true"/> <!-- Maasina Fulfulde -->
|
||||
<languageMatch desired="fub" supported="ff" distance="10" oneway="true"/> <!-- Adamawa Fulfulde -->
|
||||
<languageMatch desired="fue" supported="ff" distance="10" oneway="true"/> <!-- Borgu Fulfulde -->
|
||||
<languageMatch desired="fuf" supported="ff" distance="10" oneway="true"/> <!-- Pular -->
|
||||
<languageMatch desired="fuh" supported="ff" distance="10" oneway="true"/> <!-- Western Niger Fulfulde -->
|
||||
<languageMatch desired="fui" supported="ff" distance="10" oneway="true"/> <!-- Bagirmi Fulfulde -->
|
||||
<languageMatch desired="fuq" supported="ff" distance="10" oneway="true"/> <!-- Central-Eastern Niger Fulfulde -->
|
||||
<languageMatch desired="fuv" supported="ff" distance="10" oneway="true"/> <!-- Nigerian Fulfulde -->
|
||||
<!-- Encompassed by Guarani -->
|
||||
<languageMatch desired="gnw" supported="gn" distance="10" oneway="true"/> <!-- Western Bolivian Guaraní -->
|
||||
<languageMatch desired="gui" supported="gn" distance="10" oneway="true"/> <!-- Eastern Bolivian Guaraní -->
|
||||
<languageMatch desired="gun" supported="gn" distance="10" oneway="true"/> <!-- Mbyá Guaraní -->
|
||||
<languageMatch desired="nhd" supported="gn" distance="10" oneway="true"/> <!-- Chiripá -->
|
||||
<!-- Encompassed by Inuktitut -->
|
||||
<languageMatch desired="ikt" supported="iu" distance="10" oneway="true"/> <!-- Inuinnaqtun -->
|
||||
<!-- Encompassed by Kalenjin -->
|
||||
<languageMatch desired="enb" supported="kln" distance="10" oneway="true"/> <!-- Markweeta -->
|
||||
<languageMatch desired="eyo" supported="kln" distance="10" oneway="true"/> <!-- Keiyo -->
|
||||
<languageMatch desired="niq" supported="kln" distance="10" oneway="true"/> <!-- Nandi -->
|
||||
<languageMatch desired="oki" supported="kln" distance="10" oneway="true"/> <!-- Okiek -->
|
||||
<languageMatch desired="pko" supported="kln" distance="10" oneway="true"/> <!-- Pökoot -->
|
||||
<languageMatch desired="sgc" supported="kln" distance="10" oneway="true"/> <!-- Kipsigis -->
|
||||
<languageMatch desired="tec" supported="kln" distance="10" oneway="true"/> <!-- Terik -->
|
||||
<languageMatch desired="tuy" supported="kln" distance="10" oneway="true"/> <!-- Tugen -->
|
||||
<!-- Encompassed by Konkani -->
|
||||
<languageMatch desired="gom" supported="kok" distance="10" oneway="true"/> <!-- Goan Konkani -->
|
||||
<!-- Encompassed by Kpelle -->
|
||||
<languageMatch desired="gkp" supported="kpe" distance="10" oneway="true"/> <!-- Guinea Kpelle -->
|
||||
<!-- Encompassed by Luyia -->
|
||||
<languageMatch desired="ida" supported="luy" distance="10" oneway="true"/> <!-- Idakho-Isukha-Tiriki -->
|
||||
<languageMatch desired="lkb" supported="luy" distance="10" oneway="true"/> <!-- Kabras -->
|
||||
<languageMatch desired="lko" supported="luy" distance="10" oneway="true"/> <!-- Khayo -->
|
||||
<languageMatch desired="lks" supported="luy" distance="10" oneway="true"/> <!-- Kisa -->
|
||||
<languageMatch desired="lri" supported="luy" distance="10" oneway="true"/> <!-- Marachi -->
|
||||
<languageMatch desired="lrm" supported="luy" distance="10" oneway="true"/> <!-- Marama -->
|
||||
<languageMatch desired="lsm" supported="luy" distance="10" oneway="true"/> <!-- Saamia -->
|
||||
<languageMatch desired="lto" supported="luy" distance="10" oneway="true"/> <!-- Tsotso -->
|
||||
<languageMatch desired="lts" supported="luy" distance="10" oneway="true"/> <!-- Tachoni -->
|
||||
<languageMatch desired="lwg" supported="luy" distance="10" oneway="true"/> <!-- Wanga -->
|
||||
<languageMatch desired="nle" supported="luy" distance="10" oneway="true"/> <!-- East Nyala -->
|
||||
<languageMatch desired="nyd" supported="luy" distance="10" oneway="true"/> <!-- Nyore -->
|
||||
<languageMatch desired="rag" supported="luy" distance="10" oneway="true"/> <!-- Logooli -->
|
||||
<!-- Encompassed by Latvian -->
|
||||
<languageMatch desired="ltg" supported="lv" distance="10" oneway="true"/> <!-- Latgalian -->
|
||||
<!-- Encompassed by Malagasy -->
|
||||
<languageMatch desired="bhr" supported="mg" distance="10" oneway="true"/> <!-- Bara Malagasy -->
|
||||
<languageMatch desired="bjq" supported="mg" distance="10" oneway="true"/> <!-- Southern Betsimisaraka Malagasy -->
|
||||
<languageMatch desired="bmm" supported="mg" distance="10" oneway="true"/> <!-- Northern Betsimisaraka Malagasy -->
|
||||
<languageMatch desired="bzc" supported="mg" distance="10" oneway="true"/> <!-- Southern Betsimisaraka Malagasy -->
|
||||
<languageMatch desired="msh" supported="mg" distance="10" oneway="true"/> <!-- Masikoro Malagasy -->
|
||||
<languageMatch desired="skg" supported="mg" distance="10" oneway="true"/> <!-- Sakalava Malagasy -->
|
||||
<languageMatch desired="tdx" supported="mg" distance="10" oneway="true"/> <!-- Tandroy-Mahafaly Malagasy -->
|
||||
<languageMatch desired="tkg" supported="mg" distance="10" oneway="true"/> <!-- Tesaka Malagasy -->
|
||||
<languageMatch desired="txy" supported="mg" distance="10" oneway="true"/> <!-- Tanosy Malagasy -->
|
||||
<languageMatch desired="xmv" supported="mg" distance="10" oneway="true"/> <!-- Antankarana Malagasy -->
|
||||
<languageMatch desired="xmw" supported="mg" distance="10" oneway="true"/> <!-- Tsimihety Malagasy -->
|
||||
<!-- Encompassed by Mongolian -->
|
||||
<languageMatch desired="mvf" supported="mn" distance="10" oneway="true"/> <!-- Peripheral Mongolian -->
|
||||
<!-- Encompassed by Malay -->
|
||||
<languageMatch desired="bjn" supported="ms" distance="10" oneway="true"/> <!-- Banjar -->
|
||||
<languageMatch desired="btj" supported="ms" distance="10" oneway="true"/> <!-- Bacanese Malay -->
|
||||
<languageMatch desired="bve" supported="ms" distance="10" oneway="true"/> <!-- Berau Malay -->
|
||||
<languageMatch desired="bvu" supported="ms" distance="10" oneway="true"/> <!-- Bukit Malay -->
|
||||
<languageMatch desired="coa" supported="ms" distance="10" oneway="true"/> <!-- Cocos Islands Malay -->
|
||||
<languageMatch desired="dup" supported="ms" distance="10" oneway="true"/> <!-- Duano -->
|
||||
<languageMatch desired="hji" supported="ms" distance="10" oneway="true"/> <!-- Haji -->
|
||||
<languageMatch desired="id" supported="ms" distance="10" oneway="true"/> <!-- Indonesian -->
|
||||
<languageMatch desired="jak" supported="ms" distance="10" oneway="true"/> <!-- Jakun -->
|
||||
<languageMatch desired="jax" supported="ms" distance="10" oneway="true"/> <!-- Jambi Malay -->
|
||||
<languageMatch desired="kvb" supported="ms" distance="10" oneway="true"/> <!-- Kubu -->
|
||||
<languageMatch desired="kvr" supported="ms" distance="10" oneway="true"/> <!-- Kerinci -->
|
||||
<languageMatch desired="kxd" supported="ms" distance="10" oneway="true"/> <!-- Brunei -->
|
||||
<languageMatch desired="lce" supported="ms" distance="10" oneway="true"/> <!-- Loncong -->
|
||||
<languageMatch desired="lcf" supported="ms" distance="10" oneway="true"/> <!-- Lubu -->
|
||||
<languageMatch desired="liw" supported="ms" distance="10" oneway="true"/> <!-- Col -->
|
||||
<languageMatch desired="max" supported="ms" distance="10" oneway="true"/> <!-- North Moluccan Malay -->
|
||||
<languageMatch desired="meo" supported="ms" distance="10" oneway="true"/> <!-- Kedah Malay -->
|
||||
<languageMatch desired="mfa" supported="ms" distance="10" oneway="true"/> <!-- Pattani Malay -->
|
||||
<languageMatch desired="mfb" supported="ms" distance="10" oneway="true"/> <!-- Bangka -->
|
||||
<languageMatch desired="min" supported="ms" distance="10" oneway="true"/> <!-- Minangkabau -->
|
||||
<languageMatch desired="mqg" supported="ms" distance="10" oneway="true"/> <!-- Kota Bangun Kutai Malay -->
|
||||
<languageMatch desired="msi" supported="ms" distance="10" oneway="true"/> <!-- Sabah Malay -->
|
||||
<languageMatch desired="mui" supported="ms" distance="10" oneway="true"/> <!-- Musi -->
|
||||
<languageMatch desired="orn" supported="ms" distance="10" oneway="true"/> <!-- Orang Kanaq -->
|
||||
<languageMatch desired="ors" supported="ms" distance="10" oneway="true"/> <!-- Orang Seletar -->
|
||||
<languageMatch desired="pel" supported="ms" distance="10" oneway="true"/> <!-- Pekal -->
|
||||
<languageMatch desired="pse" supported="ms" distance="10" oneway="true"/> <!-- Central Malay -->
|
||||
<languageMatch desired="tmw" supported="ms" distance="10" oneway="true"/> <!-- Temuan -->
|
||||
<languageMatch desired="urk" supported="ms" distance="10" oneway="true"/> <!-- Urak Lawoi' -->
|
||||
<languageMatch desired="vkk" supported="ms" distance="10" oneway="true"/> <!-- Kaur -->
|
||||
<languageMatch desired="vkt" supported="ms" distance="10" oneway="true"/> <!-- Tenggarong Kutai Malay -->
|
||||
<languageMatch desired="xmm" supported="ms" distance="10" oneway="true"/> <!-- Manado Malay -->
|
||||
<languageMatch desired="zlm" supported="ms" distance="10" oneway="true"/> <!-- Malay (individual language) -->
|
||||
<languageMatch desired="zmi" supported="ms" distance="10" oneway="true"/> <!-- Negeri Sembilan Malay -->
|
||||
<!-- Encompassed by Nepali -->
|
||||
<languageMatch desired="dty" supported="ne" distance="10" oneway="true"/> <!-- Dotyali -->
|
||||
<!-- Encompassed by Oromo -->
|
||||
<languageMatch desired="gax" supported="om" distance="10" oneway="true"/> <!-- Borana-Arsi-Guji Oromo -->
|
||||
<languageMatch desired="hae" supported="om" distance="10" oneway="true"/> <!-- Eastern Oromo -->
|
||||
<languageMatch desired="orc" supported="om" distance="10" oneway="true"/> <!-- Orma -->
|
||||
<!-- Encompassed by Odia -->
|
||||
<languageMatch desired="spv" supported="or" distance="10" oneway="true"/> <!-- Sambalpuri -->
|
||||
<!-- Encompassed by Pashto -->
|
||||
<languageMatch desired="pbt" supported="ps" distance="10" oneway="true"/> <!-- Southern Pashto -->
|
||||
<languageMatch desired="pst" supported="ps" distance="10" oneway="true"/> <!-- Central Pashto -->
|
||||
<!-- Encompassed by Quechua -->
|
||||
<languageMatch desired="qub" supported="qu" distance="10" oneway="true"/> <!-- Huallaga Huánuco Quechua -->
|
||||
<languageMatch desired="qud" supported="qu" distance="10" oneway="true"/> <!-- Calderón Highland Quichua -->
|
||||
<languageMatch desired="quf" supported="qu" distance="10" oneway="true"/> <!-- Lambayeque Quechua -->
|
||||
<languageMatch desired="qug" supported="qu" distance="10" oneway="true"/> <!-- Chimborazo Highland Quichua -->
|
||||
<languageMatch desired="quh" supported="qu" distance="10" oneway="true"/> <!-- South Bolivian Quechua -->
|
||||
<languageMatch desired="quk" supported="qu" distance="10" oneway="true"/> <!-- Chachapoyas Quechua -->
|
||||
<languageMatch desired="qul" supported="qu" distance="10" oneway="true"/> <!-- North Bolivian Quechua -->
|
||||
<languageMatch desired="qup" supported="qu" distance="10" oneway="true"/> <!-- Southern Pastaza Quechua -->
|
||||
<languageMatch desired="qur" supported="qu" distance="10" oneway="true"/> <!-- Yanahuanca Pasco Quechua -->
|
||||
<languageMatch desired="qus" supported="qu" distance="10" oneway="true"/> <!-- Santiago del Estero Quichua -->
|
||||
<languageMatch desired="quw" supported="qu" distance="10" oneway="true"/> <!-- Tena Lowland Quichua -->
|
||||
<languageMatch desired="qux" supported="qu" distance="10" oneway="true"/> <!-- Yauyos Quechua -->
|
||||
<languageMatch desired="quy" supported="qu" distance="10" oneway="true"/> <!-- Ayacucho Quechua -->
|
||||
<languageMatch desired="qva" supported="qu" distance="10" oneway="true"/> <!-- Ambo-Pasco Quechua -->
|
||||
<languageMatch desired="qvc" supported="qu" distance="10" oneway="true"/> <!-- Cajamarca Quechua -->
|
||||
<languageMatch desired="qve" supported="qu" distance="10" oneway="true"/> <!-- Eastern Apurímac Quechua -->
|
||||
<languageMatch desired="qvh" supported="qu" distance="10" oneway="true"/> <!-- Huamalíes-Dos de Mayo Huánuco Quechua -->
|
||||
<languageMatch desired="qvi" supported="qu" distance="10" oneway="true"/> <!-- Imbabura Highland Quichua -->
|
||||
<languageMatch desired="qvj" supported="qu" distance="10" oneway="true"/> <!-- Loja Highland Quichua -->
|
||||
<languageMatch desired="qvl" supported="qu" distance="10" oneway="true"/> <!-- Cajatambo North Lima Quechua -->
|
||||
<languageMatch desired="qvm" supported="qu" distance="10" oneway="true"/> <!-- Margos-Yarowilca-Lauricocha Quechua -->
|
||||
<languageMatch desired="qvn" supported="qu" distance="10" oneway="true"/> <!-- North Junín Quechua -->
|
||||
<languageMatch desired="qvo" supported="qu" distance="10" oneway="true"/> <!-- Napo Lowland Quechua -->
|
||||
<languageMatch desired="qvp" supported="qu" distance="10" oneway="true"/> <!-- Pacaraos Quechua -->
|
||||
<languageMatch desired="qvs" supported="qu" distance="10" oneway="true"/> <!-- San Martín Quechua -->
|
||||
<languageMatch desired="qvw" supported="qu" distance="10" oneway="true"/> <!-- Huaylla Wanca Quechua -->
|
||||
<languageMatch desired="qvz" supported="qu" distance="10" oneway="true"/> <!-- Northern Pastaza Quichua -->
|
||||
<languageMatch desired="qwa" supported="qu" distance="10" oneway="true"/> <!-- Corongo Ancash Quechua -->
|
||||
<languageMatch desired="qwc" supported="qu" distance="10" oneway="true"/> <!-- Classical Quechua -->
|
||||
<languageMatch desired="qwh" supported="qu" distance="10" oneway="true"/> <!-- Huaylas Ancash Quechua -->
|
||||
<languageMatch desired="qws" supported="qu" distance="10" oneway="true"/> <!-- Sihuas Ancash Quechua -->
|
||||
<languageMatch desired="qxa" supported="qu" distance="10" oneway="true"/> <!-- Chiquián Ancash Quechua -->
|
||||
<languageMatch desired="qxc" supported="qu" distance="10" oneway="true"/> <!-- Chincha Quechua -->
|
||||
<languageMatch desired="qxh" supported="qu" distance="10" oneway="true"/> <!-- Panao Huánuco Quechua -->
|
||||
<languageMatch desired="qxl" supported="qu" distance="10" oneway="true"/> <!-- Salasaca Highland Quichua -->
|
||||
<languageMatch desired="qxn" supported="qu" distance="10" oneway="true"/> <!-- Northern Conchucos Ancash Quechua -->
|
||||
<languageMatch desired="qxo" supported="qu" distance="10" oneway="true"/> <!-- Southern Conchucos Ancash Quechua -->
|
||||
<languageMatch desired="qxp" supported="qu" distance="10" oneway="true"/> <!-- Puno Quechua -->
|
||||
<languageMatch desired="qxr" supported="qu" distance="10" oneway="true"/> <!-- Cañar Highland Quichua -->
|
||||
<languageMatch desired="qxt" supported="qu" distance="10" oneway="true"/> <!-- Santa Ana de Tusi Pasco Quechua -->
|
||||
<languageMatch desired="qxu" supported="qu" distance="10" oneway="true"/> <!-- Arequipa-La Unión Quechua -->
|
||||
<languageMatch desired="qxw" supported="qu" distance="10" oneway="true"/> <!-- Jauja Wanca Quechua -->
|
||||
<!-- Encompassed by Sardinian -->
|
||||
<languageMatch desired="sdc" supported="sc" distance="10" oneway="true"/> <!-- Sassarese Sardinian -->
|
||||
<languageMatch desired="sdn" supported="sc" distance="10" oneway="true"/> <!-- Gallurese Sardinian -->
|
||||
<languageMatch desired="sro" supported="sc" distance="10" oneway="true"/> <!-- Campidanese Sardinian -->
|
||||
<!-- Encompassed by Albanian -->
|
||||
<languageMatch desired="aae" supported="sq" distance="10" oneway="true"/> <!-- Arbëreshë Albanian -->
|
||||
<languageMatch desired="aat" supported="sq" distance="10" oneway="true"/> <!-- Arvanitika Albanian -->
|
||||
<languageMatch desired="aln" supported="sq" distance="10" oneway="true"/> <!-- Gheg Albanian -->
|
||||
<!-- Encompassed by Syriac -->
|
||||
<languageMatch desired="aii" supported="syr" distance="10" oneway="true"/> <!-- Assyrian Neo-Aramaic -->
|
||||
<!-- Encompassed by Uzbek -->
|
||||
<languageMatch desired="uzs" supported="uz" distance="10" oneway="true"/> <!-- Southern Uzbek -->
|
||||
<!-- Encompassed by Yiddish -->
|
||||
<languageMatch desired="yih" supported="yi" distance="10" oneway="true"/> <!-- Western Yiddish -->
|
||||
<!-- Encompassed by Chinese, Mandarin -->
|
||||
<languageMatch desired="cdo" supported="zh" distance="10" oneway="true"/> <!-- Min Dong Chinese -->
|
||||
<languageMatch desired="cjy" supported="zh" distance="10" oneway="true"/> <!-- Jinyu Chinese -->
|
||||
<languageMatch desired="cpx" supported="zh" distance="10" oneway="true"/> <!-- Pu-Xian Chinese -->
|
||||
<languageMatch desired="czh" supported="zh" distance="10" oneway="true"/> <!-- Huizhou Chinese -->
|
||||
<languageMatch desired="czo" supported="zh" distance="10" oneway="true"/> <!-- Min Zhong Chinese -->
|
||||
<languageMatch desired="gan" supported="zh" distance="10" oneway="true"/> <!-- Gan Chinese -->
|
||||
<languageMatch desired="hak" supported="zh" distance="10" oneway="true"/> <!-- Hakka Chinese -->
|
||||
<languageMatch desired="hsn" supported="zh" distance="10" oneway="true"/> <!-- Xiang Chinese -->
|
||||
<languageMatch desired="lzh" supported="zh" distance="10" oneway="true"/> <!-- Literary Chinese -->
|
||||
<languageMatch desired="mnp" supported="zh" distance="10" oneway="true"/> <!-- Min Bei Chinese -->
|
||||
<languageMatch desired="nan" supported="zh" distance="10" oneway="true"/> <!-- Min Nan Chinese -->
|
||||
<languageMatch desired="wuu" supported="zh" distance="10" oneway="true"/> <!-- Wu Chinese -->
|
||||
<languageMatch desired="yue" supported="zh" distance="10" oneway="true"/> <!-- Chinese, Cantonese -->
|
||||
<!-- END generated by GenerateLanguageMatches.java -->
|
||||
<languageMatch desired="*" supported="*" distance="80"/> <!-- * ⇒ * -->
|
||||
<languageMatch desired="az_Latn" supported="ru_Cyrl" distance="10" oneway="true"/> <!-- az; Latn ⇒ ru; Cyrl -->
|
||||
<languageMatch desired="bn_Beng" supported="en_Latn" distance="10" oneway="true"/> <!-- bn; Beng ⇒ en; Latn -->
|
||||
<languageMatch desired="hy_Armn" supported="ru_Cyrl" distance="10" oneway="true"/> <!-- hy; Armn ⇒ ru; Cyrl -->
|
||||
<languageMatch desired="ka_Geor" supported="en_Latn" distance="10" oneway="true"/> <!-- ka; Geor ⇒ en; Latn -->
|
||||
<languageMatch desired="km_Khmr" supported="en_Latn" distance="10" oneway="true"/> <!-- km; Khmr ⇒ en; Latn -->
|
||||
<languageMatch desired="kn_Knda" supported="en_Latn" distance="10" oneway="true"/> <!-- kn; Knda ⇒ en; Latn -->
|
||||
<languageMatch desired="lo_Laoo" supported="en_Latn" distance="10" oneway="true"/> <!-- lo; Laoo ⇒ en; Latn -->
|
||||
<languageMatch desired="ml_Mlym" supported="en_Latn" distance="10" oneway="true"/> <!-- ml; Mlym ⇒ en; Latn -->
|
||||
<languageMatch desired="my_Mymr" supported="en_Latn" distance="10" oneway="true"/> <!-- my; Mymr ⇒ en; Latn -->
|
||||
<languageMatch desired="ne_Deva" supported="en_Latn" distance="10" oneway="true"/> <!-- ne; Deva ⇒ en; Latn -->
|
||||
<languageMatch desired="or_Orya" supported="en_Latn" distance="10" oneway="true"/> <!-- or; Orya ⇒ en; Latn -->
|
||||
<languageMatch desired="pa_Guru" supported="en_Latn" distance="10" oneway="true"/> <!-- pa; Guru ⇒ en; Latn -->
|
||||
<languageMatch desired="ps_Arab" supported="en_Latn" distance="10" oneway="true"/> <!-- ps; Arab ⇒ en; Latn -->
|
||||
<languageMatch desired="sd_Arab" supported="en_Latn" distance="10" oneway="true"/> <!-- sd; Arab ⇒ en; Latn -->
|
||||
<languageMatch desired="si_Sinh" supported="en_Latn" distance="10" oneway="true"/> <!-- si; Sinh ⇒ en; Latn -->
|
||||
<languageMatch desired="ta_Taml" supported="en_Latn" distance="10" oneway="true"/> <!-- ta; Taml ⇒ en; Latn -->
|
||||
<languageMatch desired="te_Telu" supported="en_Latn" distance="10" oneway="true"/> <!-- te; Telu ⇒ en; Latn -->
|
||||
<languageMatch desired="ti_Ethi" supported="en_Latn" distance="10" oneway="true"/> <!-- ti; Ethi ⇒ en; Latn -->
|
||||
<languageMatch desired="tk_Latn" supported="ru_Cyrl" distance="10" oneway="true"/> <!-- tk; Latn ⇒ ru; Cyrl -->
|
||||
<languageMatch desired="ur_Arab" supported="en_Latn" distance="10" oneway="true"/> <!-- ur; Arab ⇒ en; Latn -->
|
||||
<languageMatch desired="uz_Latn" supported="ru_Cyrl" distance="10" oneway="true"/> <!-- uz; Latn ⇒ ru; Cyrl -->
|
||||
<languageMatch desired="yi_Hebr" supported="en_Latn" distance="10" oneway="true"/> <!-- yi; Hebr ⇒ en; Latn -->
|
||||
<languageMatch desired="sr_Latn" supported="sr_Cyrl" distance="5"/> <!-- sr; Latn ⇒ sr; Cyrl -->
|
||||
<languageMatch desired="zh_Hans" supported="zh_Hant" distance="15" oneway="true"/> <!-- zh; Hans ⇒ zh; Hant -->
|
||||
<languageMatch desired="zh_Hant" supported="zh_Hans" distance="19" oneway="true"/> <!-- zh; Hant ⇒ zh; Hans -->
|
||||
<!-- zh_Hani: Slightly bigger distance than zh_Hant->zh_Hans -->
|
||||
<languageMatch desired="zh_Hani" supported="zh_Hans" distance="20" oneway="true"/>
|
||||
<languageMatch desired="zh_Hani" supported="zh_Hant" distance="20" oneway="true"/>
|
||||
<!-- Latin transliterations of some languages, initially from CLDR-13577 -->
|
||||
<languageMatch desired="ar_Latn" supported="ar_Arab" distance="20" oneway="true"/>
|
||||
<languageMatch desired="bn_Latn" supported="bn_Beng" distance="20" oneway="true"/>
|
||||
<languageMatch desired="gu_Latn" supported="gu_Gujr" distance="20" oneway="true"/>
|
||||
<languageMatch desired="hi_Latn" supported="hi_Deva" distance="20" oneway="true"/>
|
||||
<languageMatch desired="kn_Latn" supported="kn_Knda" distance="20" oneway="true"/>
|
||||
<languageMatch desired="ml_Latn" supported="ml_Mlym" distance="20" oneway="true"/>
|
||||
<languageMatch desired="mr_Latn" supported="mr_Deva" distance="20" oneway="true"/>
|
||||
<languageMatch desired="ta_Latn" supported="ta_Taml" distance="20" oneway="true"/>
|
||||
<languageMatch desired="te_Latn" supported="te_Telu" distance="20" oneway="true"/>
|
||||
<languageMatch desired="zh_Latn" supported="zh_Hans" distance="20" oneway="true"/> <!-- Pinyin -->
|
||||
<!-- start fallbacks for group script codes, initially from CLDR-13526
|
||||
Look for plus signs on https://www.unicode.org/iso15924/iso15924-codes.html -->
|
||||
<languageMatch desired="ja_Latn" supported="ja_Jpan" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Hani" supported="ja_Jpan" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Hira" supported="ja_Jpan" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Kana" supported="ja_Jpan" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Hrkt" supported="ja_Jpan" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Hira" supported="ja_Hrkt" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ja_Kana" supported="ja_Hrkt" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ko_Hani" supported="ko_Kore" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ko_Hang" supported="ko_Kore" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ko_Jamo" supported="ko_Kore" distance="5" oneway="true"/>
|
||||
<languageMatch desired="ko_Jamo" supported="ko_Hang" distance="5" oneway="true"/>
|
||||
<!-- No special mappings for zh Bopo/Hanb
|
||||
because Bopomofo is used only in TW, and unsure how widely.
|
||||
No special mappings for styled scripts like Latf or Aran
|
||||
because those would apply to many languages;
|
||||
if desired, those would be better handled as matcher-specific script aliases. -->
|
||||
<!-- end fallbacks for group script codes -->
|
||||
<!-- default script mismatch distance -->
|
||||
<languageMatch desired="*_*" supported="*_*" distance="50"/> <!-- *; * ⇒ *; * -->
|
||||
|
||||
<languageMatch desired="ar_*_$maghreb" supported="ar_*_$maghreb" distance="4"/> <!-- ar; *; $maghreb ⇒ ar; *; $maghreb -->
|
||||
<languageMatch desired="ar_*_$!maghreb" supported="ar_*_$!maghreb" distance="4"/> <!-- ar; *; $!maghreb ⇒ ar; *; $!maghreb -->
|
||||
<languageMatch desired="ar_*_*" supported="ar_*_*" distance="5"/> <!-- ar; *; * ⇒ ar; *; * -->
|
||||
<languageMatch desired="en_*_$enUS" supported="en_*_$enUS" distance="4"/> <!-- en; *; $enUS ⇒ en; *; $enUS -->
|
||||
<languageMatch desired="en_*_$!enUS" supported="en_*_GB" distance="3"/> <!-- Make en_GB preferred... -->
|
||||
<languageMatch desired="en_*_$!enUS" supported="en_*_$!enUS" distance="4"/> <!-- en; *; $!enUS ⇒ en; *; $!enUS -->
|
||||
<languageMatch desired="en_*_*" supported="en_*_*" distance="5"/> <!-- en; *; * ⇒ en; *; * -->
|
||||
<languageMatch desired="es_*_$americas" supported="es_*_$americas" distance="4"/> <!-- es; *; $americas ⇒ es; *; $americas -->
|
||||
<languageMatch desired="es_*_$!americas" supported="es_*_$!americas" distance="4"/> <!-- es; *; $!americas ⇒ es; *; $!americas -->
|
||||
<languageMatch desired="es_*_*" supported="es_*_*" distance="5"/> <!-- es; *; * ⇒ es; *; * -->
|
||||
<languageMatch desired="pt_*_$americas" supported="pt_*_$americas" distance="4"/> <!-- pt; *; $americas ⇒ pt; *; $americas -->
|
||||
<languageMatch desired="pt_*_$!americas" supported="pt_*_$!americas" distance="4"/> <!-- pt; *; $!americas ⇒ pt; *; $!americas -->
|
||||
<languageMatch desired="pt_*_*" supported="pt_*_*" distance="5"/> <!-- pt; *; * ⇒ pt; *; * -->
|
||||
<languageMatch desired="zh_Hant_$cnsar" supported="zh_Hant_$cnsar" distance="4"/> <!-- zh; Hant; $cnsar ⇒ zh; Hant; $cnsar -->
|
||||
<languageMatch desired="zh_Hant_$!cnsar" supported="zh_Hant_$!cnsar" distance="4"/> <!-- zh; Hant; $!cnsar ⇒ zh; Hant; $!cnsar -->
|
||||
<languageMatch desired="zh_Hant_*" supported="zh_Hant_*" distance="5"/> <!-- zh; Hant; * ⇒ zh; Hant; * -->
|
||||
<languageMatch desired="*_*_*" supported="*_*_*" distance="4"/> <!-- *; *; * ⇒ *; *; * -->
|
||||
</languageMatches>
|
||||
</languageMatching>
|
||||
</supplementalData>
|
3
lib/language_data/data/override_language_names.csv
Normal file
|
@ -0,0 +1,3 @@
|
|||
zsm,zsm,bahasa Malaysia
|
||||
id,id,bahasa Indonesia
|
||||
ms,ms,bahasa Malaysia
|
|
5645
lib/language_data/data/supplementalData.xml
Normal file
7845
lib/language_data/data/wiktionary/codes-en.csv
Normal file
89
lib/language_data/language_lists.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
# This is the list of language codes with the 'modern' level of support in CLDR
|
||||
# (compared to 'full', which contains many more languages). We use this as the
|
||||
# list of languages that we store specific name-to-code mappings for.
|
||||
|
||||
CLDR_LANGUAGES = {
|
||||
"af",
|
||||
"am",
|
||||
"ar",
|
||||
"as",
|
||||
"az",
|
||||
"be",
|
||||
"bg",
|
||||
"bn",
|
||||
"bs",
|
||||
"ca",
|
||||
"cs",
|
||||
"cy",
|
||||
"da",
|
||||
"de",
|
||||
"el",
|
||||
"en",
|
||||
"es",
|
||||
"et",
|
||||
"eu",
|
||||
"fa",
|
||||
"fi",
|
||||
"fil",
|
||||
"fr",
|
||||
"ga",
|
||||
"gl",
|
||||
"gu",
|
||||
"he",
|
||||
"hi",
|
||||
"hr",
|
||||
"hu",
|
||||
"hy",
|
||||
"id",
|
||||
"is",
|
||||
"it",
|
||||
"ja",
|
||||
"jv",
|
||||
"ka",
|
||||
"kk",
|
||||
"km",
|
||||
"kn",
|
||||
"ko",
|
||||
"ky",
|
||||
"lo",
|
||||
"lt",
|
||||
"lv",
|
||||
"mk",
|
||||
"ml",
|
||||
"mn",
|
||||
"mr",
|
||||
"ms",
|
||||
"my",
|
||||
"nb",
|
||||
"ne",
|
||||
"nl",
|
||||
"or",
|
||||
"pa",
|
||||
"pl",
|
||||
"pt",
|
||||
"ro",
|
||||
"ru",
|
||||
"sd",
|
||||
"si",
|
||||
"sk",
|
||||
"sl",
|
||||
"so",
|
||||
"sq",
|
||||
"sr",
|
||||
"sv",
|
||||
"sw",
|
||||
"ta",
|
||||
"te",
|
||||
"th",
|
||||
"ti",
|
||||
"tk",
|
||||
"tr",
|
||||
"uk",
|
||||
"und",
|
||||
"ur",
|
||||
"uz",
|
||||
"vi",
|
||||
"yue",
|
||||
"zh",
|
||||
"zu",
|
||||
}
|
9210
lib/language_data/name_data.py
Normal file
112
lib/language_data/names.py
Normal file
|
@ -0,0 +1,112 @@
|
|||
# import marisa_trie
|
||||
import warnings
|
||||
|
||||
from language_data.util import data_filename
|
||||
|
||||
|
||||
TRIES = {}
|
||||
|
||||
# This is something we could hypothetically discover from XML files, but
|
||||
# we end up learning that most languages separate things with commas, with
|
||||
# a few exceptions. We'll just put those exceptions here.
|
||||
DISPLAY_SEPARATORS = {
|
||||
'am': '፣',
|
||||
'ar': '، ',
|
||||
'brx': ',',
|
||||
'fa': '، ',
|
||||
'ja': '、',
|
||||
'my': '၊ ',
|
||||
'ug': '، ',
|
||||
'und': ', ',
|
||||
'ur': '، ',
|
||||
'yue': ',',
|
||||
'zh': ',',
|
||||
}
|
||||
|
||||
|
||||
def normalize_name(name):
|
||||
"""
|
||||
When looking up a language-code component by name, we would rather ignore
|
||||
distinctions of case and certain punctuation. "Chinese (Traditional)"
|
||||
should be matched by "Chinese Traditional" and "chinese traditional".
|
||||
"""
|
||||
name = name.casefold()
|
||||
name = name.replace("’", "'")
|
||||
name = name.replace("-", " ")
|
||||
name = name.replace("(", "")
|
||||
name = name.replace(")", "")
|
||||
name = name.replace(",", "")
|
||||
return name.strip()
|
||||
|
||||
|
||||
# def load_trie(filename):
|
||||
# """
|
||||
# Load a BytesTrie from the marisa_trie on-disk format.
|
||||
# """
|
||||
# trie = marisa_trie.BytesTrie()
|
||||
# # marisa_trie raises warnings that make no sense. Ignore them.
|
||||
# with warnings.catch_warnings():
|
||||
# warnings.simplefilter("ignore")
|
||||
# trie.load(filename)
|
||||
# return trie
|
||||
|
||||
|
||||
def get_trie_value(trie, key):
|
||||
"""
|
||||
Get the value that a BytesTrie stores for a particular key, decoded
|
||||
as Unicode. Raises a KeyError if there is no value for that key.
|
||||
"""
|
||||
return trie[key][0].decode("utf-8")
|
||||
|
||||
|
||||
def name_to_code(category, name, language: str = "und"):
|
||||
"""
|
||||
Get a language, script, or territory by its name in some language.
|
||||
|
||||
The language here must be a string representing a language subtag only.
|
||||
The `Language.find` method can handle other representations of a language
|
||||
and normalize them to this form.
|
||||
|
||||
The default language, "und", will allow matching names in any language,
|
||||
so you can get the code 'fr' by looking up "French", "Français", or
|
||||
"francés".
|
||||
|
||||
A small amount of fuzzy matching is supported: if the name can be
|
||||
shortened or lengthened to match a single language name, you get that
|
||||
language. This allows, for example, "Hakka Chinese" to match "Hakka".
|
||||
|
||||
Occasionally, names are ambiguous in a way that can be resolved by
|
||||
specifying what name the language is supposed to be in. For example,
|
||||
there is a language named 'Malayo' in English, but it's different from
|
||||
the language named 'Malayo' in Spanish (which is Malay). Specifying the
|
||||
language will look up the name in a trie that is only in that language.
|
||||
"""
|
||||
assert "/" not in language, "Language codes cannot contain slashes"
|
||||
assert "-" not in language, "This code should be reduced to a language subtag only"
|
||||
trie_name = "{}/name_to_{}".format(language, category)
|
||||
if trie_name not in TRIES:
|
||||
TRIES[trie_name] = load_trie(data_filename("trie/{}.marisa".format(trie_name)))
|
||||
|
||||
trie = TRIES[trie_name]
|
||||
lookup = normalize_name(name)
|
||||
if lookup in trie:
|
||||
return get_trie_value(trie, lookup)
|
||||
else:
|
||||
# Is this a language name plus extra verbiage? Maybe it has "...isch",
|
||||
# "... language", or "... Chinese" attached to it, for example. Look
|
||||
# for a matching prefix of the desired name with at least 4 characters.
|
||||
prefixes = trie.prefixes(lookup)
|
||||
if prefixes and len(prefixes[-1]) >= 4:
|
||||
return get_trie_value(trie, prefixes[-1])
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def code_to_names(code):
|
||||
"""
|
||||
Given the code for a language, script, or territory, get a dictionary of its
|
||||
names in various languages.
|
||||
"""
|
||||
# late import to save memory when possible
|
||||
import language_data.name_data
|
||||
return language_data.name_data.CODE_TO_NAMES.get(code, {})
|
6140
lib/language_data/population_data.py
Normal file
58
lib/language_data/registry_parser.py
Normal file
|
@ -0,0 +1,58 @@
|
|||
from language_data.util import data_filename
|
||||
|
||||
LIST_KEYS = {'Description', 'Prefix'}
|
||||
|
||||
|
||||
def parse_file(file):
|
||||
"""
|
||||
Take an open file containing the IANA subtag registry, and yield a
|
||||
dictionary of information for each subtag it describes.
|
||||
"""
|
||||
lines = []
|
||||
for line in file:
|
||||
line = line.rstrip('\n')
|
||||
if line == '%%':
|
||||
# This is a separator between items. Parse the data we've
|
||||
# collected and yield the result.
|
||||
yield from parse_item(lines)
|
||||
lines.clear()
|
||||
elif line.startswith(' '):
|
||||
# This is a continuation line. Concatenate it to the previous
|
||||
# line, including one of the spaces.
|
||||
lines[-1] += line[1:]
|
||||
else:
|
||||
lines.append(line)
|
||||
yield from parse_item(lines)
|
||||
|
||||
|
||||
def parse_item(lines):
|
||||
"""
|
||||
Given the lines that form a subtag entry (after joining wrapped lines
|
||||
back together), parse the data they contain.
|
||||
|
||||
Returns a generator that yields once if there was any data there
|
||||
(and an empty generator if this was just the header).
|
||||
"""
|
||||
info = {}
|
||||
for line in lines:
|
||||
key, value = line.split(': ', 1)
|
||||
if key in LIST_KEYS:
|
||||
info.setdefault(key, []).append(value)
|
||||
else:
|
||||
assert key not in info
|
||||
info[key] = value
|
||||
|
||||
if 'Subtag' in info or 'Tag' in info:
|
||||
yield info
|
||||
|
||||
|
||||
def parse_registry():
|
||||
"""
|
||||
Yield a sequence of dictionaries, containing the info in the included
|
||||
IANA subtag registry file.
|
||||
"""
|
||||
with open(data_filename('language-subtag-registry.txt'),
|
||||
encoding='utf-8') as data_file:
|
||||
# 'yield from' instead of returning, so that we only close the file
|
||||
# when finished.
|
||||
yield from parse_file(data_file)
|
15
lib/language_data/util.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
"""
|
||||
Used for locating a file in the data directory.
|
||||
"""
|
||||
|
||||
from pkg_resources import resource_filename
|
||||
DATA_ROOT = resource_filename('language_data', 'data')
|
||||
import os
|
||||
|
||||
|
||||
def data_filename(filename):
|
||||
"""
|
||||
Given a relative filename, get the full path to that file in the data
|
||||
directory.
|
||||
"""
|
||||
return os.path.join(DATA_ROOT, filename)
|
|
@ -25,6 +25,7 @@ import unicodedata
|
|||
from exceptions_helper import ex, ConnectionSkipException
|
||||
from json_helper import json_loads
|
||||
from cachecontrol import CacheControl, caches
|
||||
from lib.dateutil.parser import parser
|
||||
# from lib.tmdbsimple.configuration import Configuration
|
||||
# from lib.tmdbsimple.genres import Genres
|
||||
from cfscrape import CloudflareScraper
|
||||
|
@ -41,6 +42,7 @@ from six import integer_types, iteritems, iterkeys, itervalues, moves, PY2, stri
|
|||
import zipfile
|
||||
# py7z hardwired removed, see comment below
|
||||
py7zr = None
|
||||
tz_p = parser()
|
||||
|
||||
# noinspection PyUnreachableCode
|
||||
if False:
|
||||
|
@ -60,7 +62,7 @@ if False:
|
|||
from sickgear import db, notifiers as NOTIFIERS
|
||||
# noinspection PyUnresolvedReferences
|
||||
from typing import Any, AnyStr, Dict, Generator, NoReturn, integer_types, Iterable, Iterator, List, Optional, \
|
||||
Tuple, Union
|
||||
Tuple, Type, Union
|
||||
|
||||
html_convert_fractions = {0: '', 25: '¼', 50: '½', 75: '¾', 100: 1}
|
||||
|
||||
|
@ -634,6 +636,21 @@ def try_int(s, s_default=0):
|
|||
return s_default
|
||||
|
||||
|
||||
def try_date(s, s_default=None):
|
||||
# type: (AnyStr, Any) -> Optional[AnyStr]
|
||||
"""
|
||||
Convert string to a standard UTC date string
|
||||
:param s:
|
||||
:param s_default:
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
parse = tz_p.parse(clean_data(s))
|
||||
return '%04d-%02d-%02d' % (parse.year, parse.month, parse.day)
|
||||
except(BaseException, Exception):
|
||||
return s_default
|
||||
|
||||
|
||||
def _maybe_request_url(e, def_url=''):
|
||||
return hasattr(e, 'request') and hasattr(e.request, 'url') and ' ' + e.request.url or def_url
|
||||
|
||||
|
@ -644,6 +661,7 @@ def clean_data(data):
|
|||
|
||||
Issues corrected:
|
||||
- Replaces & with &
|
||||
- Replace multiple spaces with one space
|
||||
- Trailing whitespace
|
||||
- Decode html entities
|
||||
:param data: data
|
||||
|
@ -659,7 +677,7 @@ def clean_data(data):
|
|||
if isinstance(data, dict):
|
||||
return {k: clean_data(v) for k, v in iteritems(data)}
|
||||
if isinstance(data, string_types):
|
||||
return unicodedata.normalize('NFKD', html_unescape(data).strip().replace('&', '&'))
|
||||
return unicodedata.normalize('NFKD', re.sub(r' {2,}', ' ', html_unescape(data).strip().replace('&', '&')))
|
||||
return data
|
||||
|
||||
|
||||
|
@ -1740,3 +1758,16 @@ def is_virtualenv():
|
|||
"""Get base/real prefix, or `sys.prefix` if there is none."""
|
||||
get_base_prefix_compat = getattr(sys, 'base_prefix', None) or getattr(sys, 'real_prefix', None) or sys.prefix
|
||||
return get_base_prefix_compat != sys.prefix
|
||||
|
||||
|
||||
def enforce_type(value, allowed_types, default):
|
||||
# type: (Any, Union[Type, Tuple[Type]], Any) -> Any
|
||||
"""
|
||||
enforces that value is given type(s)
|
||||
:param value: value to check
|
||||
:param allowed_types: type or tuple of types allowed
|
||||
:param default: value to return if other type
|
||||
"""
|
||||
if not isinstance(value, allowed_types):
|
||||
return default
|
||||
return value
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
import copy
|
||||
import datetime
|
||||
import diskcache
|
||||
import itertools
|
||||
import logging
|
||||
import threading
|
||||
import shutil
|
||||
import time
|
||||
from collections import deque
|
||||
from exceptions_helper import ex
|
||||
|
||||
from six import integer_types, iteritems, iterkeys, string_types, text_type
|
||||
from typing import Callable
|
||||
|
||||
|
||||
from lib.tvinfo_base.exceptions import *
|
||||
from sg_helpers import calc_age, make_path
|
||||
|
@ -42,6 +46,13 @@ TVINFO_TWITTER = 250000
|
|||
TVINFO_FACEBOOK = 250001
|
||||
TVINFO_INSTAGRAM = 250002
|
||||
TVINFO_WIKIPEDIA = 250003
|
||||
TVINFO_REDDIT = 250004
|
||||
TVINFO_YOUTUBE = 250005
|
||||
TVINFO_WIKIDATA = 250006
|
||||
TVINFO_TIKTOK = 250007
|
||||
TVINFO_LINKEDIN = 25008
|
||||
TVINFO_OFFICIALSITE = 250009
|
||||
TVINFO_FANSITE = 250010
|
||||
|
||||
tv_src_names = {
|
||||
TVINFO_TVDB: 'tvdb',
|
||||
|
@ -60,13 +71,30 @@ tv_src_names = {
|
|||
TVINFO_TWITTER: 'twitter',
|
||||
TVINFO_FACEBOOK: 'facebook',
|
||||
TVINFO_INSTAGRAM: 'instagram',
|
||||
TVINFO_WIKIPEDIA: 'wikipedia'
|
||||
TVINFO_WIKIPEDIA: 'wikipedia',
|
||||
TVINFO_REDDIT: 'reddit',
|
||||
TVINFO_YOUTUBE: 'youtube',
|
||||
TVINFO_WIKIDATA: 'wikidata',
|
||||
TVINFO_TIKTOK: 'tiktok',
|
||||
TVINFO_LINKEDIN: 'linkedin',
|
||||
TVINFO_OFFICIALSITE: 'officialsite',
|
||||
TVINFO_FANSITE: 'fansite'
|
||||
|
||||
}
|
||||
|
||||
TVINFO_MID_SEASON_FINALE = 1
|
||||
TVINFO_SEASON_FINALE = 2
|
||||
TVINFO_SERIES_FINALE = 3
|
||||
|
||||
final_types = {
|
||||
TVINFO_MID_SEASON_FINALE: 'mid-season',
|
||||
TVINFO_SEASON_FINALE: 'season',
|
||||
TVINFO_SERIES_FINALE: 'series'
|
||||
}
|
||||
|
||||
log = logging.getLogger('TVInfo')
|
||||
log.addHandler(logging.NullHandler())
|
||||
TVInfoShowContainer = {} # type: Dict[str, ShowContainer]
|
||||
TVInfoShowContainer = {} # type: Union[ShowContainer, Dict]
|
||||
|
||||
|
||||
class ShowContainer(dict):
|
||||
|
@ -87,7 +115,7 @@ class ShowContainer(dict):
|
|||
|
||||
def cleanup_old(self):
|
||||
"""
|
||||
remove entries that are older then max_age
|
||||
remove entries that are older than max_age
|
||||
"""
|
||||
acquired_lock = self.lock.acquire(False)
|
||||
if acquired_lock:
|
||||
|
@ -137,46 +165,120 @@ class TVInfoIDs(object):
|
|||
return {TVINFO_TVDB: self.tvdb, TVINFO_TMDB: self.tmdb, TVINFO_TVMAZE: self.tvmaze,
|
||||
TVINFO_IMDB: self.imdb, TVINFO_TRAKT: self.trakt, TVINFO_TVRAGE: self.rage}.get(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__[{
|
||||
TVINFO_TVDB: 'tvdb', TVINFO_TMDB: 'tmdb', TVINFO_TVMAZE: 'tvmaze',
|
||||
TVINFO_IMDB: 'imdb', TVINFO_TRAKT: 'trakt', TVINFO_TVRAGE: 'rage'
|
||||
}[key]] = value
|
||||
|
||||
def get(self, key):
|
||||
return self.__getitem__(key)
|
||||
|
||||
def keys(self):
|
||||
for k, v in iter(((TVINFO_TVDB, self.tvdb), (TVINFO_TMDB, self.tmdb), (TVINFO_TVMAZE, self.tvmaze),
|
||||
(TVINFO_IMDB, self.imdb), (TVINFO_TRAKT, self.trakt), (TVINFO_TVRAGE, self.rage))):
|
||||
if None is not v:
|
||||
yield k
|
||||
|
||||
def __iter__(self):
|
||||
for s, v in [(TVINFO_TVDB, self.tvdb), (TVINFO_TMDB, self.tmdb), (TVINFO_TVMAZE, self.tvmaze),
|
||||
(TVINFO_IMDB, self.imdb), (TVINFO_TRAKT, self.trakt), (TVINFO_TVRAGE, self.rage)]:
|
||||
for s, v in iter(((TVINFO_TVDB, self.tvdb), (TVINFO_TMDB, self.tmdb), (TVINFO_TVMAZE, self.tvmaze),
|
||||
(TVINFO_IMDB, self.imdb), (TVINFO_TRAKT, self.trakt), (TVINFO_TVRAGE, self.rage))):
|
||||
if None is not v:
|
||||
yield s, v
|
||||
|
||||
def __len__(self):
|
||||
counter = itertools.count()
|
||||
deque(zip(self.__iter__(), counter), maxlen=0) # (consume at C speed)
|
||||
return next(counter)
|
||||
|
||||
def __str__(self):
|
||||
return ', '.join('%s: %s' % (tv_src_names.get(k, k), v) for k, v in self.__iter__())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
__repr__ = __str__
|
||||
iteritems = __iter__
|
||||
items = __iter__
|
||||
iterkeys = keys
|
||||
|
||||
|
||||
class TVInfoSocialIDs(object):
|
||||
def __init__(self, twitter=None, instagram=None, facebook=None, wikipedia=None, ids=None):
|
||||
# type: (str_int, str_int, str_int, str_int, Dict[int, str_int]) -> None
|
||||
def __init__(
|
||||
self,
|
||||
twitter=None, # type: str_int
|
||||
instagram=None, # type: str_int
|
||||
facebook=None, # type: str_int
|
||||
wikipedia=None, # type: str_int
|
||||
ids=None, # type: Dict[int, str_int]
|
||||
reddit=None, # type: str_int
|
||||
youtube=None, # type: AnyStr
|
||||
wikidata=None, # type: AnyStr
|
||||
tiktok=None, # type: AnyStr
|
||||
linkedin=None, # type: AnyStr
|
||||
fansite=None # type: AnyStr
|
||||
):
|
||||
ids = ids or {}
|
||||
self.twitter = twitter or ids.get(TVINFO_TWITTER)
|
||||
self.instagram = instagram or ids.get(TVINFO_INSTAGRAM)
|
||||
self.facebook = facebook or ids.get(TVINFO_FACEBOOK)
|
||||
self.wikipedia = wikipedia or ids.get(TVINFO_WIKIPEDIA)
|
||||
self.reddit = reddit or ids.get(TVINFO_REDDIT)
|
||||
self.youtube = youtube or ids.get(TVINFO_YOUTUBE)
|
||||
self.wikidata = wikidata or ids.get(TVINFO_WIKIDATA)
|
||||
self.tiktok = tiktok or ids.get(TVINFO_TIKTOK)
|
||||
self.linkedin = linkedin or ids.get(TVINFO_LINKEDIN)
|
||||
self.fansite = fansite or ids.get(TVINFO_FANSITE)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return {TVINFO_TWITTER: self.twitter, TVINFO_INSTAGRAM: self.instagram, TVINFO_FACEBOOK: self.facebook,
|
||||
TVINFO_WIKIPEDIA: self.wikipedia}.get(key)
|
||||
TVINFO_WIKIDATA: self.wikidata, TVINFO_WIKIPEDIA: self.wikipedia, TVINFO_REDDIT: self.reddit,
|
||||
TVINFO_TIKTOK: self.tiktok, TVINFO_LINKEDIN: self.linkedin, TVINFO_FANSITE: self.fansite,
|
||||
TVINFO_YOUTUBE: self.youtube}.get(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__[{
|
||||
TVINFO_TWITTER: 'twitter', TVINFO_INSTAGRAM: 'instagram', TVINFO_FACEBOOK: 'facebook',
|
||||
TVINFO_WIKIPEDIA: 'wikipedia', TVINFO_REDDIT: 'reddit', TVINFO_YOUTUBE: 'youtube',
|
||||
TVINFO_WIKIDATA: 'wikidata', TVINFO_TIKTOK: 'tiktok', TVINFO_LINKEDIN: 'linkedin', TVINFO_FANSITE: 'fansite'
|
||||
}[key]] = value
|
||||
|
||||
def get(self, key):
|
||||
return self.__getitem__(key)
|
||||
|
||||
def keys(self):
|
||||
for k, v in iter(((TVINFO_TWITTER, self.twitter), (TVINFO_INSTAGRAM, self.instagram),
|
||||
(TVINFO_FACEBOOK, self.facebook), (TVINFO_TIKTOK, self.tiktok),
|
||||
(TVINFO_WIKIPEDIA, self.wikipedia), (TVINFO_WIKIDATA, self.wikidata),
|
||||
(TVINFO_REDDIT, self.reddit), (TVINFO_YOUTUBE, self.youtube),
|
||||
(TVINFO_LINKEDIN, self.linkedin), (TVINFO_FANSITE, self.fansite))):
|
||||
if None is not v:
|
||||
yield k
|
||||
|
||||
def __iter__(self):
|
||||
for s, v in [(TVINFO_TWITTER, self.twitter), (TVINFO_INSTAGRAM, self.instagram),
|
||||
(TVINFO_FACEBOOK, self.facebook), (TVINFO_WIKIPEDIA, self.wikipedia)]:
|
||||
for s, v in iter(((TVINFO_TWITTER, self.twitter), (TVINFO_INSTAGRAM, self.instagram),
|
||||
(TVINFO_FACEBOOK, self.facebook), (TVINFO_TIKTOK, self.tiktok),
|
||||
(TVINFO_WIKIPEDIA, self.wikipedia), (TVINFO_WIKIDATA, self.wikidata),
|
||||
(TVINFO_REDDIT, self.reddit), (TVINFO_YOUTUBE, self.youtube),
|
||||
(TVINFO_LINKEDIN, self.linkedin), (TVINFO_FANSITE, self.fansite))):
|
||||
if None is not v:
|
||||
yield s, v
|
||||
|
||||
def __len__(self):
|
||||
counter = itertools.count()
|
||||
deque(zip(self.__iter__(), counter), maxlen=0) # (consume at C speed)
|
||||
return next(counter)
|
||||
|
||||
def __str__(self):
|
||||
return ', '.join('%s: %s' % (tv_src_names.get(k, k), v) for k, v in self.__iter__())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
__repr__ = __str__
|
||||
iteritems = __iter__
|
||||
items = __iter__
|
||||
iterkeys = keys
|
||||
|
||||
|
||||
class TVInfoImageType(object):
|
||||
|
@ -227,10 +329,10 @@ class TVInfoImageSize(object):
|
|||
|
||||
class TVInfoImage(object):
|
||||
def __init__(self, image_type, sizes, img_id=None, main_image=False, type_str='', rating=None, votes=None,
|
||||
lang=None, height=None, width=None, aspect_ratio=None):
|
||||
lang=None, height=None, width=None, aspect_ratio=None, updated_at=None):
|
||||
self.img_id = img_id # type: Optional[integer_types]
|
||||
self.image_type = image_type # type: integer_types
|
||||
self.sizes = sizes # type: Dict[int, AnyStr]
|
||||
self.sizes = sizes # type: Union[TVInfoImageSize, Dict]
|
||||
self.type_str = type_str # type: AnyStr
|
||||
self.main_image = main_image # type: bool
|
||||
self.rating = rating # type: Optional[Union[float, integer_types]]
|
||||
|
@ -239,6 +341,10 @@ class TVInfoImage(object):
|
|||
self.height = height # type: Optional[integer_types]
|
||||
self.width = width # type: Optional[integer_types]
|
||||
self.aspect_ratio = aspect_ratio # type: Optional[Union[float, integer_types]]
|
||||
self.updated_at = updated_at # type: Optional[integer_types]
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __str__(self):
|
||||
return '<TVInfoImage %s [%s]>' % (TVInfoImageType.reverse_str.get(self.image_type, 'unknown'),
|
||||
|
@ -248,13 +354,20 @@ class TVInfoImage(object):
|
|||
|
||||
|
||||
class TVInfoNetwork(object):
|
||||
def __init__(self, name, n_id=None, country=None, country_code=None, timezone=None, stream=None):
|
||||
def __init__(self, name, n_id=None, country=None, country_code=None, timezone=None, stream=None, active_date=None,
|
||||
inactive_date=None):
|
||||
# type: (AnyStr, integer_types, AnyStr, AnyStr, AnyStr, bool, AnyStr, AnyStr) -> None
|
||||
self.name = name # type: AnyStr
|
||||
self.id = n_id # type: Optional[integer_types]
|
||||
self.country = country # type: Optional[AnyStr]
|
||||
self.country_code = country_code # type: Optional[AnyStr]
|
||||
self.timezone = timezone # type: Optional[AnyStr]
|
||||
self.stream = stream # type: Optional[bool]
|
||||
self.active_date = active_date # type: Optional[AnyStr]
|
||||
self.inactive_date = inactive_date # type: Optional[AnyStr]
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __str__(self):
|
||||
return '<Network (%s)>' % ', '.join('%s' % s for s in [self.name, self.id, self.country, self.country_code,
|
||||
|
@ -267,7 +380,7 @@ class TVInfoShow(dict):
|
|||
"""Holds a dict of seasons, and show data.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, show_loaded=True):
|
||||
dict.__init__(self)
|
||||
self.lock = threading.RLock()
|
||||
self.data = {} # type: Dict
|
||||
|
@ -283,7 +396,6 @@ class TVInfoShow(dict):
|
|||
self.ids = TVInfoIDs() # type: TVInfoIDs
|
||||
self.social_ids = TVInfoSocialIDs() # type: TVInfoSocialIDs
|
||||
self.slug = None # type: Optional[AnyStr]
|
||||
self.seriesid = None # type: integer_types
|
||||
self.seriesname = None # type: Optional[AnyStr]
|
||||
self.aliases = [] # type: List[AnyStr]
|
||||
self.season = None # type: integer_types
|
||||
|
@ -303,6 +415,7 @@ class TVInfoShow(dict):
|
|||
self.network_is_stream = None # type: Optional[bool]
|
||||
self.runtime = None # type: integer_types
|
||||
self.language = None # type: Optional[AnyStr]
|
||||
self.spoken_languages = [] # type: List[string_types]
|
||||
self.official_site = None # type: Optional[AnyStr]
|
||||
self.imdb_id = None # type: Optional[AnyStr]
|
||||
self.zap2itid = None # type: Optional[AnyStr]
|
||||
|
@ -317,7 +430,7 @@ class TVInfoShow(dict):
|
|||
self.contentrating = None # type: Optional[AnyStr]
|
||||
self.rating = None # type: Union[integer_types, float]
|
||||
self.status = None # type: Optional[AnyStr]
|
||||
self.overview = None # type: Optional[AnyStr]
|
||||
self.overview = '' # type: AnyStr
|
||||
self.poster = None # type: Optional[AnyStr]
|
||||
self.poster_thumb = None # type: Optional[AnyStr]
|
||||
self.banner = None # type: Optional[AnyStr]
|
||||
|
@ -332,6 +445,33 @@ class TVInfoShow(dict):
|
|||
self.vote_average = None # type: Optional[Union[integer_types, float]]
|
||||
self.origin_countries = [] # type: List[AnyStr]
|
||||
self.requested_language = '' # type: AnyStr
|
||||
self.alt_ep_numbering = {} # type: Dict[Any, Dict[integer_types, Dict[integer_types, TVInfoEpisode]]]
|
||||
self.watcher_count = None # type: integer_types
|
||||
self.play_count = None # type: integer_types
|
||||
self.collected_count = None # type: integer_types
|
||||
self.collector_count = None # type: integer_types
|
||||
self.next_season_airdate = None # type: Optional[string_types]
|
||||
# trailers dict containing: {language: trailer url} , 'any' for unknown langauge
|
||||
self.trailers = {} # type: Dict[string_types, string_types]
|
||||
self.show_loaded = show_loaded # type: bool
|
||||
self.load_method = None # type: Optional[Callable]
|
||||
|
||||
def load_data(self):
|
||||
if not self.show_loaded and self.id and isinstance(self.load_method, Callable):
|
||||
_new_show_data = self.load_method(self.id, load_actors=False)
|
||||
if isinstance(_new_show_data, TVInfoShow):
|
||||
self.__dict__.update(_new_show_data.__dict__)
|
||||
self.show_loaded = True
|
||||
|
||||
@property
|
||||
def seriesid(self):
|
||||
# type: (...) -> integer_types
|
||||
return self.id
|
||||
|
||||
@seriesid.setter
|
||||
def seriesid(self, val):
|
||||
# type: (integer_types) -> None
|
||||
self.id = val
|
||||
|
||||
def __str__(self):
|
||||
nr_seasons = len(self)
|
||||
|
@ -348,7 +488,7 @@ class TVInfoShow(dict):
|
|||
|
||||
raise AttributeError
|
||||
|
||||
def __getitem__(self, key, raise_error=True):
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, string_types) and key in self.__dict__:
|
||||
return self.__dict__[key]
|
||||
|
||||
|
@ -360,7 +500,6 @@ class TVInfoShow(dict):
|
|||
# Non-numeric request is for show-data
|
||||
return dict.__getitem__(self.data, key)
|
||||
|
||||
if raise_error:
|
||||
# Data wasn't found, raise appropriate error
|
||||
if isinstance(key, integer_types) or isinstance(key, string_types) and key.isdigit():
|
||||
# Episode number x was not found
|
||||
|
@ -370,8 +509,12 @@ class TVInfoShow(dict):
|
|||
# doesn't exist, so attribute error.
|
||||
raise BaseTVinfoAttributenotfound('Cannot find attribute %s' % (repr(key)))
|
||||
|
||||
def get(self, __key, __default=None):
|
||||
return self.__getitem__(__key, raise_error=None is __default) or __default
|
||||
def get(self, __key, *args):
|
||||
try:
|
||||
return self.__getitem__(__key)
|
||||
except (BaseException, Exception):
|
||||
if 0 != len(args):
|
||||
return args[0]
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
cls = self.__class__
|
||||
|
@ -380,10 +523,14 @@ class TVInfoShow(dict):
|
|||
for k, v in self.__dict__.items():
|
||||
if 'lock' == k:
|
||||
setattr(result, k, threading.RLock())
|
||||
elif 'load_method' == k:
|
||||
setattr(result, k, None)
|
||||
else:
|
||||
setattr(result, k, copy.deepcopy(v, memo))
|
||||
for k, v in self.items():
|
||||
result[k] = copy.deepcopy(v, memo)
|
||||
if isinstance(k, integer_types):
|
||||
setattr(result[k], 'show', result)
|
||||
return result
|
||||
|
||||
def __bool__(self):
|
||||
|
@ -417,8 +564,9 @@ class TVInfoShow(dict):
|
|||
|
||||
def __getstate__(self):
|
||||
d = dict(self.__dict__)
|
||||
for d_a in ('lock', 'load_method'):
|
||||
try:
|
||||
del d['lock']
|
||||
del d[d_a]
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
return d
|
||||
|
@ -426,24 +574,25 @@ class TVInfoShow(dict):
|
|||
def __setstate__(self, d):
|
||||
self.__dict__ = d
|
||||
self.lock = threading.RLock()
|
||||
self.load_method = None
|
||||
|
||||
__repr__ = __str__
|
||||
__nonzero__ = __bool__
|
||||
|
||||
|
||||
class TVInfoSeason(dict):
|
||||
def __init__(self, show=None, **kwargs):
|
||||
def __init__(self, show=None, number=None, **kwargs):
|
||||
"""The show attribute points to the parent show
|
||||
"""
|
||||
super(TVInfoSeason, self).__init__(**kwargs)
|
||||
self.show = show # type: TVInfoShow
|
||||
self.id = None # type: integer_types
|
||||
self.number = None # type: integer_types
|
||||
self.number = number # type: integer_types
|
||||
self.name = None # type: Optional[AnyStr]
|
||||
self.actors = [] # type: List[Dict]
|
||||
self.cast = CastList() # type: Dict[integer_types, TVInfoCharacter]
|
||||
self.network = None # type: Optional[AnyStr]
|
||||
self.network_id = None # type: integer_types
|
||||
self.network_id = None # type: Optional[integer_types]
|
||||
self.network_timezone = None # type: Optional[AnyStr]
|
||||
self.network_country = None # type: Optional[AnyStr]
|
||||
self.network_country_code = None # type: Optional[AnyStr]
|
||||
|
@ -476,9 +625,12 @@ class TVInfoSeason(dict):
|
|||
result = cls.__new__(cls)
|
||||
memo[id(self)] = result
|
||||
for k, v in self.__dict__.items():
|
||||
# noinspection PyArgumentList
|
||||
setattr(result, k, copy.deepcopy(v, memo))
|
||||
for k, v in self.items():
|
||||
result[k] = copy.deepcopy(v, memo)
|
||||
if isinstance(k, integer_types):
|
||||
setattr(result[k], 'season', result)
|
||||
return result
|
||||
|
||||
def search(self, term=None, key=None):
|
||||
|
@ -516,7 +668,7 @@ class TVInfoEpisode(dict):
|
|||
self.writers = [] # type: List[AnyStr]
|
||||
self.crew = CrewList() # type: CrewList
|
||||
self.episodename = None # type: Optional[AnyStr]
|
||||
self.overview = None # type: Optional[AnyStr]
|
||||
self.overview = '' # type: AnyStr
|
||||
self.language = {'episodeName': None, 'overview': None} # type: Dict[AnyStr, Optional[AnyStr]]
|
||||
self.productioncode = None # type: Optional[AnyStr]
|
||||
self.showurl = None # type: Optional[AnyStr]
|
||||
|
@ -544,17 +696,21 @@ class TVInfoEpisode(dict):
|
|||
self.contentrating = None # type: Optional[AnyStr]
|
||||
self.thumbadded = None # type: Optional[AnyStr]
|
||||
self.rating = None # type: Union[integer_types, float]
|
||||
self.vote_count = None # type: integer_types
|
||||
self.siteratingcount = None # type: integer_types
|
||||
self.show = show # type: Optional[TVInfoShow]
|
||||
self.alt_nums = {} # type: Dict[AnyStr, Dict[integer_types, integer_types]]
|
||||
self.finale_type = None # type: Optional[integer_types]
|
||||
|
||||
def __str__(self):
|
||||
show_name = (self.show and self.show.seriesname and '<Show %s> - ' % self.show.seriesname) or ''
|
||||
seasno, epno = int(getattr(self, 'seasonnumber', 0)), int(getattr(self, 'episodenumber', 0))
|
||||
seasno, epno = int(getattr(self, 'seasonnumber', 0) or 0), int(getattr(self, 'episodenumber', 0) or 0)
|
||||
epname = getattr(self, 'episodename', '')
|
||||
finale_str = (self.finale_type and ' (%s finale)' % final_types.get(self.finale_type).capitalize()) or ''
|
||||
if None is not epname:
|
||||
return '%s<Episode %02dx%02d - %r>' % (show_name, seasno, epno, epname)
|
||||
return '%s<Episode %02dx%02d - %r%s>' % (show_name, seasno, epno, epname, finale_str)
|
||||
else:
|
||||
return '%s<Episode %02dx%02d>' % (show_name, seasno, epno)
|
||||
return '%s<Episode %02dx%02d%s>' % (show_name, seasno, epno, finale_str)
|
||||
|
||||
def __getattr__(self, key):
|
||||
if key in self:
|
||||
|
@ -572,6 +728,7 @@ class TVInfoEpisode(dict):
|
|||
result = cls.__new__(cls)
|
||||
memo[id(self)] = result
|
||||
for k, v in self.__dict__.items():
|
||||
# noinspection PyArgumentList
|
||||
setattr(result, k, copy.deepcopy(v, memo))
|
||||
for k, v in self.items():
|
||||
result[k] = copy.deepcopy(v, memo)
|
||||
|
@ -663,19 +820,19 @@ class PersonBase(dict):
|
|||
sortorder
|
||||
"""
|
||||
def __init__(
|
||||
self, # type:
|
||||
self,
|
||||
p_id=None, # type: integer_types
|
||||
name=None, # type: AnyStr
|
||||
image=None, # type: AnyStr
|
||||
images=None, # type: List[TVInfoImage]
|
||||
gender=None, # type: int
|
||||
gender=None, # type: integer_types
|
||||
bio=None, # type: AnyStr
|
||||
birthdate=None, # type: datetime.date
|
||||
deathdate=None, # type: datetime.date
|
||||
country=None, # type: AnyStr
|
||||
country_code=None, # type: AnyStr
|
||||
country_timezone=None, # type: AnyStr
|
||||
ids=None, # type: Dict
|
||||
ids=None, # type: TVInfoIDs
|
||||
thumb_url=None, # type: AnyStr
|
||||
**kwargs # type: Dict
|
||||
):
|
||||
|
@ -692,7 +849,7 @@ class PersonBase(dict):
|
|||
self.country = country # type: Optional[AnyStr]
|
||||
self.country_code = country_code # type: Optional[AnyStr]
|
||||
self.country_timezone = country_timezone # type: Optional[AnyStr]
|
||||
self.ids = ids or {} # type: Dict[int, integer_types]
|
||||
self.ids = ids or TVInfoIDs() # type: TVInfoIDs
|
||||
|
||||
def calc_age(self, date=None):
|
||||
# type: (Optional[datetime.date]) -> Optional[int]
|
||||
|
@ -726,6 +883,7 @@ class PersonGenders(object):
|
|||
reverse = {v: k for k, v in iteritems(named)}
|
||||
tmdb_map = {0: unknown, 1: female, 2: male}
|
||||
imdb_map = {'female': female, 'male': male}
|
||||
tvdb_map = {0: unknown, 1: male, 2: female, 3: unknown} # 3 is technically: other
|
||||
|
||||
|
||||
class Crew(PersonBase):
|
||||
|
@ -749,21 +907,21 @@ class TVInfoPerson(PersonBase):
|
|||
image=None, # type: Optional[AnyStr]
|
||||
images=None, # type: List[TVInfoImage]
|
||||
thumb_url=None, # type: AnyStr
|
||||
gender=None, # type: int
|
||||
gender=None, # type: integer_types
|
||||
bio=None, # type: AnyStr
|
||||
birthdate=None, # type: datetime.date
|
||||
deathdate=None, # type: datetime.date
|
||||
country=None, # type: AnyStr
|
||||
country_code=None, # type: AnyStr
|
||||
country_timezone=None, # type: AnyStr
|
||||
ids=None, # type: Dict
|
||||
homepage=None, # type: AnyStr
|
||||
social_ids=None, # type: Dict
|
||||
ids=None, # type: TVInfoIDs
|
||||
homepage=None, # type: Optional[AnyStr]
|
||||
social_ids=None, # type: TVInfoSocialIDs
|
||||
birthplace=None, # type: AnyStr
|
||||
deathplace=None, # type: AnyStr
|
||||
url=None, # type: AnyStr
|
||||
characters=None, # type: List[TVInfoCharacter]
|
||||
height=None, # type: Union[integer_types, float]
|
||||
deathplace=None, # type: AnyStr
|
||||
nicknames=None, # type: Set[AnyStr]
|
||||
real_name=None, # type: AnyStr
|
||||
akas=None, # type: Set[AnyStr]
|
||||
|
@ -775,7 +933,7 @@ class TVInfoPerson(PersonBase):
|
|||
country_code=country_code, country_timezone=country_timezone, ids=ids, **kwargs)
|
||||
self.credits = [] # type: List
|
||||
self.homepage = homepage # type: Optional[AnyStr]
|
||||
self.social_ids = social_ids or {} # type: Dict
|
||||
self.social_ids = social_ids or TVInfoSocialIDs() # type: TVInfoSocialIDs
|
||||
self.birthplace = birthplace # type: Optional[AnyStr]
|
||||
self.deathplace = deathplace # type: Optional[AnyStr]
|
||||
self.nicknames = nicknames or set() # type: Set[AnyStr]
|
||||
|
@ -792,25 +950,30 @@ class TVInfoPerson(PersonBase):
|
|||
|
||||
|
||||
class TVInfoCharacter(PersonBase):
|
||||
def __init__(self, person=None, voice=None, plays_self=None, regular=None, show=None, start_year=None,
|
||||
end_year=None, **kwargs):
|
||||
# type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, Dict) -> None
|
||||
super(TVInfoCharacter, self).__init__(**kwargs)
|
||||
def __init__(self, person=None, voice=None, plays_self=None, regular=None, ti_show=None, start_year=None,
|
||||
end_year=None, ids=None, name=None, episode_count=None, guest_episodes_numbers=None, **kwargs):
|
||||
# type: (List[TVInfoPerson], bool, bool, bool, TVInfoShow, int, int, TVInfoIDs, AnyStr, int, Dict[int, List[int]], ...) -> None
|
||||
super(TVInfoCharacter, self).__init__(ids=ids, **kwargs)
|
||||
self.person = person # type: List[TVInfoPerson]
|
||||
self.voice = voice # type: Optional[bool]
|
||||
self.plays_self = plays_self # type: Optional[bool]
|
||||
self.regular = regular # type: Optional[bool]
|
||||
self.show = show # type: Optional[TVInfoShow]
|
||||
self.ti_show = ti_show # type: Optional[TVInfoShow]
|
||||
self.start_year = start_year # type: Optional[integer_types]
|
||||
self.end_year = end_year # type: Optional[integer_types]
|
||||
self.name = name # type: Optional[AnyStr]
|
||||
self.episode_count = episode_count # type: Optional[int]
|
||||
self.guest_episodes_numbers = guest_episodes_numbers or {} # type: Dict[int, List[int]]
|
||||
|
||||
def __str__(self):
|
||||
pn = []
|
||||
char_type = ('', ' [Guest]')[False is self.regular]
|
||||
char_show = None is not self.ti_show and ' [%s]' % self.ti_show.seriesname
|
||||
if None is not self.person:
|
||||
for p in self.person:
|
||||
if getattr(p, 'name', None):
|
||||
pn.append(p.name)
|
||||
return '<Character "%s%s">' % (self.name, ('', ' - (%s)' % ', '.join(pn))[bool(pn)])
|
||||
return '<Character%s "%s%s%s">' % (char_type, self.name, ('', ' - (%s)' % ', '.join(pn))[bool(pn)], char_show)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
@ -848,16 +1011,31 @@ class RoleTypes(object):
|
|||
CrewShowrunner = 72
|
||||
CrewOther = 100
|
||||
|
||||
reverse = {1: 'Main', 2: 'Recurring', 3: 'Guest', 4: 'Special Guest', 50: 'Director', 51: 'Writer', 52: 'Producer',
|
||||
53: 'Executive Producer', 60: 'Creator', 61: 'Editor', 62: 'Camera', 63: 'Music', 64: 'Stylist',
|
||||
65: 'Makeup', 66: 'Photography', 67: 'Sound', 68: 'Designer', 69: 'Developer', 70: 'Animation',
|
||||
71: 'Visual Effects', 100: 'Other'}
|
||||
reverse = {1: 'Main', 2: 'Recurring', 3: 'Guest', 4: 'Special Guest', 10: 'Host', 11: 'Host Guest',
|
||||
12: 'Presenter', 13: 'Presenter Guest', 14: 'Interviewer', 15: 'Interviewer Guest',
|
||||
16: 'Musical Guest', 50: 'Director', 51: 'Writer', 52: 'Producer', 53: 'Executive Producer',
|
||||
60: 'Creator', 61: 'Editor', 62: 'Camera', 63: 'Music', 64: 'Stylist', 65: 'Makeup',
|
||||
66: 'Photography', 67: 'Sound', 68: 'Designer', 69: 'Developer', 70: 'Animation',
|
||||
71: 'Visual Effects', 72: 'Showrunner', 100: 'Other'}
|
||||
crew_limit = 50
|
||||
|
||||
# just a helper to generate the reverse data
|
||||
# def __init__(self):
|
||||
# import re
|
||||
# {value: re.sub(r'([a-z])([A-Z])', r'\1 \2', name.replace('Actor', '').replace('Crew', ''))
|
||||
# for name, value in iteritems(vars(RoleTypes)) if not name.startswith('_')
|
||||
# and name not in ('reverse', 'crew_limit')}
|
||||
|
||||
|
||||
crew_type_names = {c.lower(): v for v, c in iteritems(RoleTypes.reverse) if v >= RoleTypes.crew_limit}
|
||||
|
||||
|
||||
class TVInfoSeasonTypes(object):
|
||||
default = 'default'
|
||||
official = 'official'
|
||||
dvd = 'dvd'
|
||||
|
||||
|
||||
class TVInfoBase(object):
|
||||
supported_id_searches = []
|
||||
supported_person_id_searches = []
|
||||
|
@ -869,12 +1047,12 @@ class TVInfoBase(object):
|
|||
reverse_map_languages = {v: k for k, v in iteritems(map_languages)}
|
||||
|
||||
def __init__(self, banners=False, posters=False, seasons=False, seasonwides=False, fanart=False, actors=False,
|
||||
*args, **kwargs):
|
||||
dvdorder=False, *args, **kwargs):
|
||||
global TVInfoShowContainer
|
||||
if self.__class__.__name__ not in TVInfoShowContainer:
|
||||
TVInfoShowContainer[self.__class__.__name__] = ShowContainer()
|
||||
self.shows = TVInfoShowContainer[self.__class__.__name__] # type: ShowContainer[integer_types, TVInfoShow]
|
||||
self.shows.cleanup_old()
|
||||
self.ti_shows = TVInfoShowContainer[self.__class__.__name__] # type: ShowContainer[integer_types, TVInfoShow]
|
||||
self.ti_shows.cleanup_old()
|
||||
self.lang = None # type: Optional[AnyStr]
|
||||
self.corrections = {} # type: Dict
|
||||
self.show_not_found = False # type: bool
|
||||
|
@ -903,6 +1081,7 @@ class TVInfoBase(object):
|
|||
'fanart_enabled': fanart,
|
||||
'actors_enabled': actors,
|
||||
'cache_search': kwargs.get('cache_search'),
|
||||
'dvdorder': dvdorder,
|
||||
} # type: Dict[AnyStr, Any]
|
||||
|
||||
def _must_load_data(self, sid, load_episodes, banners, posters, seasons, seasonwides, fanart, actors, lang):
|
||||
|
@ -920,10 +1099,10 @@ class TVInfoBase(object):
|
|||
:param actors: should load actors
|
||||
:param lang: requested language
|
||||
"""
|
||||
if sid not in self.shows or None is self.shows[sid].id or \
|
||||
(load_episodes and not getattr(self.shows[sid], 'ep_loaded', False)):
|
||||
if sid not in self.ti_shows or None is self.ti_shows[sid].id or \
|
||||
(load_episodes and not getattr(self.ti_shows[sid], 'ep_loaded', False)):
|
||||
return True
|
||||
_show = self.shows[sid] # type: TVInfoShow
|
||||
_show = self.ti_shows[sid] # type: TVInfoShow
|
||||
if _show.requested_language != lang:
|
||||
_show.ep_loaded = _show.poster_loaded = _show.banner_loaded = _show.actors_loaded = _show.fanart_loaded = \
|
||||
_show.seasonwide_images_loaded = _show.season_images_loaded = False
|
||||
|
@ -1079,8 +1258,9 @@ class TVInfoBase(object):
|
|||
actors=False, # type: bool
|
||||
old_call=False, # type: bool
|
||||
language=None, # type: AnyStr
|
||||
**kwargs # type: Optional[Any]
|
||||
): # type: (...) -> Optional[TVInfoShow]
|
||||
# **kwargs # type: dict
|
||||
):
|
||||
# type: (...) -> Optional[TVInfoShow]
|
||||
"""
|
||||
get data for show id
|
||||
:param show_id: id of show
|
||||
|
@ -1100,33 +1280,48 @@ class TVInfoBase(object):
|
|||
self.config.update({'banners_enabled': banners, 'posters_enabled': posters, 'seasons_enabled': seasons,
|
||||
'seasonwides_enabled': seasonwides, 'fanart_enabled': fanart, 'actors_enabled': actors,
|
||||
'language': language or 'en'})
|
||||
self.shows.lock.acquire()
|
||||
self.ti_shows.lock.acquire()
|
||||
try:
|
||||
if show_id not in self.shows:
|
||||
self.shows[show_id] = TVInfoShow() # type: TVInfoShow
|
||||
with self.shows[show_id].lock:
|
||||
self.shows.lock.release()
|
||||
if show_id not in self.ti_shows:
|
||||
self.ti_shows[show_id] = TVInfoShow() # type: TVInfoShow
|
||||
with self.ti_shows[show_id].lock:
|
||||
self.ti_shows.lock.release()
|
||||
try:
|
||||
if self._must_load_data(show_id, load_episodes, banners, posters, seasons, seasonwides, fanart,
|
||||
actors, self.config['language']):
|
||||
self.shows[show_id].requested_language = self.config['language']
|
||||
self.ti_shows[show_id].requested_language = self.config['language']
|
||||
self._get_show_data(show_id, self.map_languages.get(self.config['language'],
|
||||
self.config['language']),
|
||||
load_episodes, banners, posters, seasons, seasonwides, fanart, actors)
|
||||
if None is self.shows[show_id].id:
|
||||
with self.shows.lock:
|
||||
del self.shows[show_id]
|
||||
return None if show_id not in self.shows else copy.deepcopy(self.shows[show_id])
|
||||
if None is self.ti_shows[show_id].id:
|
||||
with self.ti_shows.lock:
|
||||
del self.ti_shows[show_id]
|
||||
if show_id not in self.ti_shows:
|
||||
return None
|
||||
else:
|
||||
show_copy = copy.deepcopy(self.ti_shows[show_id]) # type: TVInfoShow
|
||||
# provide old call compatibility for dvd order
|
||||
if self.config.get('dvdorder') and TVInfoSeasonTypes.dvd in show_copy.alt_ep_numbering:
|
||||
org_seasons, dvd_seasons = list(show_copy), \
|
||||
list(show_copy.alt_ep_numbering[TVInfoSeasonTypes.dvd])
|
||||
for r_season in set(org_seasons) - set(dvd_seasons):
|
||||
try:
|
||||
del show_copy[r_season]
|
||||
except (BaseException, Exception):
|
||||
continue
|
||||
for ti_season in dvd_seasons:
|
||||
show_copy[ti_season] = show_copy.alt_ep_numbering[TVInfoSeasonTypes.dvd][ti_season]
|
||||
return show_copy
|
||||
finally:
|
||||
try:
|
||||
if None is self.shows[show_id].id:
|
||||
with self.shows.lock:
|
||||
del self.shows[show_id]
|
||||
if None is self.ti_shows[show_id].id:
|
||||
with self.ti_shows.lock:
|
||||
del self.ti_shows[show_id]
|
||||
except (BaseException, Exception):
|
||||
pass
|
||||
finally:
|
||||
try:
|
||||
self.shows.lock.release()
|
||||
self.ti_shows.lock.release()
|
||||
except RuntimeError:
|
||||
pass
|
||||
if not old_call and None is not self._old_config:
|
||||
|
@ -1134,12 +1329,13 @@ class TVInfoBase(object):
|
|||
self._old_config = None
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def _search_show(self, name=None, ids=None, **kwargs):
|
||||
# type: (Union[AnyStr, List[AnyStr]], Dict[integer_types, integer_types], Optional[Any]) -> List[Dict]
|
||||
def _search_show(self, name=None, ids=None, lang=None, **kwargs):
|
||||
# type: (Union[AnyStr, List[AnyStr]], Dict[integer_types, integer_types], Optional[string_types], Optional[Any]) -> List[Dict]
|
||||
"""
|
||||
internal search function to find shows, should be overwritten in class
|
||||
:param name: name to search for
|
||||
:param ids: dict of ids {tvid: prodid} to search for
|
||||
:param lang: language code
|
||||
"""
|
||||
return []
|
||||
|
||||
|
@ -1154,15 +1350,26 @@ class TVInfoBase(object):
|
|||
return names
|
||||
return name
|
||||
|
||||
def search_show(self, name=None, ids=None, **kwargs):
|
||||
# type: (Union[AnyStr, List[AnyStr]], Dict[integer_types, integer_types], Optional[Any]) -> List[Dict]
|
||||
def search_show(
|
||||
self,
|
||||
name=None, # type: Union[AnyStr, List[AnyStr]]
|
||||
ids=None, # type: Dict[integer_types, integer_types]
|
||||
lang=None, # type: Optional[string_types]
|
||||
# **kwargs # type: Optional[Any]
|
||||
):
|
||||
# type: (...) -> List[Dict]
|
||||
"""
|
||||
search for series with name(s) or ids
|
||||
|
||||
:param name: series name or list of names to search for
|
||||
:param ids: dict of ids {tvid: prodid} to search for
|
||||
:param lang: language code
|
||||
:return: combined list of series results
|
||||
"""
|
||||
if None is lang:
|
||||
if self.config.get('language'):
|
||||
lang = self.config['language']
|
||||
lang = self.map_languages.get(lang, lang)
|
||||
if not name and not ids:
|
||||
log.debug('Nothing to search')
|
||||
raise BaseTVinfoShownotfound('Nothing to search')
|
||||
|
@ -1171,14 +1378,15 @@ class TVInfoBase(object):
|
|||
if not name and not any(1 for i in ids if i in self.supported_id_searches):
|
||||
log.debug('Id type not supported')
|
||||
raise BaseTVinfoShownotfound('Id type not supported')
|
||||
selected_series = self._search_show(name=name, ids=ids)
|
||||
selected_series = self._search_show(name=name, ids=ids, lang=lang)
|
||||
elif name:
|
||||
selected_series = self._search_show(name)
|
||||
selected_series = self._search_show(name, lang=lang)
|
||||
if isinstance(selected_series, dict):
|
||||
selected_series = [selected_series]
|
||||
if not isinstance(selected_series, list) or 0 == len(selected_series):
|
||||
log.debug('Series result returned zero')
|
||||
raise BaseTVinfoShownotfound('Show-name search returned zero results (cannot find show on TVDB)')
|
||||
raise BaseTVinfoShownotfound('Show-name search returned zero results (cannot find show on %s)' %
|
||||
self.__class__.__name__)
|
||||
return selected_series
|
||||
|
||||
def _set_item(self, sid, seas, ep, attrib, value):
|
||||
|
@ -1197,41 +1405,41 @@ class TVInfoBase(object):
|
|||
calls __getitem__ on tvinfo[1], there is no way to check if
|
||||
tvinfo.__dict__ should have a key "1" before we auto-create it
|
||||
"""
|
||||
# if sid not in self.shows:
|
||||
# self.shows[sid] = TVInfoShow()
|
||||
if seas not in self.shows[sid]:
|
||||
self.shows[sid][seas] = TVInfoSeason(show=self.shows[sid])
|
||||
self.shows[sid][seas].number = seas
|
||||
if ep not in self.shows[sid][seas]:
|
||||
self.shows[sid][seas][ep] = TVInfoEpisode(season=self.shows[sid][seas], show=self.shows[sid])
|
||||
# if sid not in self.ti_shows:
|
||||
# self.ti_shows[sid] = TVInfoShow()
|
||||
if seas not in self.ti_shows[sid]:
|
||||
self.ti_shows[sid][seas] = TVInfoSeason(show=self.ti_shows[sid])
|
||||
self.ti_shows[sid][seas].number = seas
|
||||
if ep not in self.ti_shows[sid][seas]:
|
||||
self.ti_shows[sid][seas][ep] = TVInfoEpisode(season=self.ti_shows[sid][seas], show=self.ti_shows[sid])
|
||||
if attrib not in ('cast', 'crew'):
|
||||
self.shows[sid][seas][ep][attrib] = value
|
||||
self.shows[sid][seas][ep].__dict__[attrib] = value
|
||||
self.ti_shows[sid][seas][ep][attrib] = value
|
||||
self.ti_shows[sid][seas][ep].__dict__[attrib] = value
|
||||
|
||||
def _set_show_data(self, sid, key, value, add=False):
|
||||
# type: (integer_types, Any, Any, bool) -> None
|
||||
"""Sets self.shows[sid] to a new Show instance, or sets the data
|
||||
"""Sets self.ti_shows[sid] to a new Show instance, or sets the data
|
||||
"""
|
||||
# if sid not in self.shows:
|
||||
# self.shows[sid] = TVInfoShow()
|
||||
# if sid not in self.ti_shows:
|
||||
# self.ti_shows[sid] = TVInfoShow()
|
||||
if key not in ('cast', 'crew'):
|
||||
if add and isinstance(self.shows[sid].data, dict) and key in self.shows[sid].data:
|
||||
self.shows[sid].data[key].update(value)
|
||||
if add and isinstance(self.ti_shows[sid].data, dict) and key in self.ti_shows[sid].data:
|
||||
self.ti_shows[sid].data[key].update(value)
|
||||
else:
|
||||
self.shows[sid].data[key] = value
|
||||
self.ti_shows[sid].data[key] = value
|
||||
if '_banners' == key:
|
||||
p_key = 'banners'
|
||||
else:
|
||||
p_key = key
|
||||
if add and key in self.shows[sid].__dict__ and isinstance(self.shows[sid].__dict__[p_key], dict):
|
||||
self.shows[sid].__dict__[p_key].update(self.shows[sid].data[key])
|
||||
if add and key in self.ti_shows[sid].__dict__ and isinstance(self.ti_shows[sid].__dict__[p_key], dict):
|
||||
self.ti_shows[sid].__dict__[p_key].update(self.ti_shows[sid].data[key])
|
||||
else:
|
||||
self.shows[sid].__dict__[p_key] = self.shows[sid].data[key]
|
||||
self.ti_shows[sid].__dict__[p_key] = self.ti_shows[sid].data[key]
|
||||
else:
|
||||
if add and key in self.shows[sid].__dict__ and isinstance(self.shows[sid].__dict__[key], dict):
|
||||
self.shows[sid].__dict__[key].update(value)
|
||||
if add and key in self.ti_shows[sid].__dict__ and isinstance(self.ti_shows[sid].__dict__[key], dict):
|
||||
self.ti_shows[sid].__dict__[key].update(value)
|
||||
else:
|
||||
self.shows[sid].__dict__[key] = value
|
||||
self.ti_shows[sid].__dict__[key] = value
|
||||
|
||||
def get_updated_shows(self):
|
||||
# type: (...) -> Dict[integer_types, integer_types]
|
||||
|
@ -1241,6 +1449,24 @@ class TVInfoBase(object):
|
|||
"""
|
||||
return {}
|
||||
|
||||
def get_similar(self, tvid, result_count=100, **kwargs):
|
||||
# type: (integer_types, int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
return list of similar shows to given id
|
||||
:param tvid: id to give similar shows for
|
||||
:param result_count: count of results requested
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_recommended_for_show(self, tvid, result_count=100, **kwargs):
|
||||
# type: (integer_types, int, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list of recommended shows to the provided tv id
|
||||
:param tvid: id to find recommended shows for
|
||||
:param result_count: result count to returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_trending(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
|
@ -1259,16 +1485,30 @@ class TVInfoBase(object):
|
|||
def get_top_rated(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get all latest shows
|
||||
get top rated shows
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_new_shows(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get new shows
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_new_seasons(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get new seasons
|
||||
"""
|
||||
return []
|
||||
|
||||
def discover(self, result_count=100, get_extra_images=False, **kwargs):
|
||||
# type: (...) -> List[TVInfoEpisode]
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
return []
|
||||
|
||||
def get_premieres(self, **kwargs):
|
||||
# type: (...) -> List[TVInfoEpisode]
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get all premiering shows
|
||||
"""
|
||||
|
@ -1281,6 +1521,93 @@ class TVInfoBase(object):
|
|||
"""
|
||||
return []
|
||||
|
||||
def get_most_played(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most played shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_most_watched(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most watched shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_most_collected(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most collected shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_recommended(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most recommended shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_recommended_for_account(self, account, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get recommended shows for account
|
||||
:param account: account to get recommendations for
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def hide_recommended_for_account(self, account, show_ids, **kwargs):
|
||||
# type: (integer_types, List[integer_types], Any) -> List[integer_types]
|
||||
"""
|
||||
hide recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:param show_ids: list of show_ids to no longer recommend for account
|
||||
:return: list of added ids
|
||||
"""
|
||||
return []
|
||||
|
||||
def unhide_recommended_for_account(self, account, show_ids, **kwargs):
|
||||
# type: (integer_types, List[integer_types], Any) -> List[integer_types]
|
||||
"""
|
||||
unhide recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:param show_ids: list of show_ids to be included in possible recommend for account
|
||||
:return: list of removed ids
|
||||
"""
|
||||
return []
|
||||
|
||||
def list_hidden_recommended_for_account(self, account, **kwargs):
|
||||
# type: (integer_types, Any) -> List[TVInfoShow]
|
||||
"""
|
||||
list hidden recommended show for account
|
||||
:param account: account to get recommendations for
|
||||
:return: list of hidden shows
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_watchlisted_for_account(self, account, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get most watchlisted shows for account
|
||||
:param account: account to get recommendations for
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_anticipated(self, result_count=100, **kwargs):
|
||||
# type: (...) -> List[TVInfoShow]
|
||||
"""
|
||||
get anticipated shows
|
||||
:param result_count: how many results are suppose to be returned
|
||||
"""
|
||||
return []
|
||||
|
||||
def __getitem__(self, item):
|
||||
# type: (Union[AnyStr, integer_types, Tuple[integer_types, bool]]) -> Union[TVInfoShow, List[Dict], None]
|
||||
"""Legacy handler (use get_show or search_show instead)
|
||||
|
@ -1322,6 +1649,7 @@ class TVInfoBase(object):
|
|||
|
||||
msg_success = 'Treating image as %s with extracted aspect ratio'
|
||||
# most posters are around 0.68 width/height ratio (eg. 680/1000)
|
||||
# noinspection DuplicatedCode
|
||||
if 0.55 <= img_ratio <= 0.8:
|
||||
log.debug(msg_success % 'poster')
|
||||
return TVInfoImageType.poster
|
||||
|
@ -1359,6 +1687,6 @@ class TVInfoBase(object):
|
|||
return self._supported_languages or []
|
||||
|
||||
def __str__(self):
|
||||
return '<TVInfo(%s) (containing: %s)>' % (self.__class__.__name__, text_type(self.shows))
|
||||
return '<TVInfo(%s) (containing: %s)>' % (self.__class__.__name__, text_type(self.ti_shows))
|
||||
|
||||
__repr__ = __str__
|
||||
|
|
|
@ -33,7 +33,7 @@ class BaseTVinfoShownotfound(BaseTVinfoError):
|
|||
pass
|
||||
|
||||
|
||||
class BaseTVinfoSeasonnotfound(BaseTVinfoError):
|
||||
class BaseTVinfoSeasonnotfound(BaseTVinfoError, AttributeError, KeyError):
|
||||
"""Season cannot be found
|
||||
"""
|
||||
pass
|
||||
|
@ -45,7 +45,7 @@ class BaseTVinfoEpisodenotfound(BaseTVinfoError):
|
|||
pass
|
||||
|
||||
|
||||
class BaseTVinfoAttributenotfound(BaseTVinfoError):
|
||||
class BaseTVinfoAttributenotfound(BaseTVinfoError, AttributeError, KeyError):
|
||||
"""Raised if an episode does not have the requested
|
||||
attribute (such as a episode name)
|
||||
"""
|
||||
|
|
|
@ -614,10 +614,12 @@ else:
|
|||
TRAKT_PIN_URL = 'https://trakt.tv/pin/6314'
|
||||
TRAKT_BASE_URL = 'https://api.trakt.tv/'
|
||||
|
||||
IMDB_MRU = ''
|
||||
MC_MRU = ''
|
||||
NE_MRU = ''
|
||||
TMDB_MRU = ''
|
||||
TVC_MRU = ''
|
||||
TVM_MRU = ''
|
||||
NE_MRU = ''
|
||||
|
||||
COOKIE_SECRET = b64encodestring(uuid.uuid4().bytes + uuid.uuid4().bytes)
|
||||
|
||||
|
@ -765,7 +767,7 @@ def init_stage_1(console_logging):
|
|||
global USE_TRAKT, TRAKT_CONNECTED_ACCOUNT, TRAKT_ACCOUNTS, TRAKT_MRU, TRAKT_VERIFY, \
|
||||
TRAKT_USE_WATCHLIST, TRAKT_REMOVE_WATCHLIST, TRAKT_TIMEOUT, TRAKT_METHOD_ADD, TRAKT_START_PAUSED, \
|
||||
TRAKT_SYNC, TRAKT_DEFAULT_INDEXER, TRAKT_REMOVE_SERIESLIST, TRAKT_UPDATE_COLLECTION, \
|
||||
MC_MRU, TVC_MRU, TVM_MRU, NE_MRU, \
|
||||
MC_MRU, NE_MRU, TMDB_MRU, TVC_MRU, TVM_MRU, \
|
||||
USE_SLACK, SLACK_NOTIFY_ONSNATCH, SLACK_NOTIFY_ONDOWNLOAD, SLACK_NOTIFY_ONSUBTITLEDOWNLOAD, \
|
||||
SLACK_CHANNEL, SLACK_AS_AUTHED, SLACK_BOT_NAME, SLACK_ICON_URL, SLACK_ACCESS_TOKEN, \
|
||||
USE_DISCORD, DISCORD_NOTIFY_ONSNATCH, DISCORD_NOTIFY_ONDOWNLOAD, \
|
||||
|
@ -1203,9 +1205,10 @@ def init_stage_1(console_logging):
|
|||
TRAKT_MRU = check_setting_str(CFG, 'Trakt', 'trakt_mru', '')
|
||||
|
||||
MC_MRU = check_setting_str(CFG, 'Metacritic', 'mc_mru', '')
|
||||
NE_MRU = check_setting_str(CFG, 'NextEpisode', 'ne_mru', '')
|
||||
TMDB_MRU = check_setting_str(CFG, 'TMDB', 'tmdb_mru', '')
|
||||
TVC_MRU = check_setting_str(CFG, 'TVCalendar', 'tvc_mru', '')
|
||||
TVM_MRU = check_setting_str(CFG, 'TVmaze', 'tvm_mru', '')
|
||||
NE_MRU = check_setting_str(CFG, 'NextEpisode', 'ne_mru', '')
|
||||
|
||||
USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0))
|
||||
PYTIVO_HOST = check_setting_str(CFG, 'pyTivo', 'pytivo_host', '')
|
||||
|
@ -1713,7 +1716,7 @@ def init_stage_2():
|
|||
background_mapping_task = threading.Thread(name='MAPPINGUPDATES', target=indexermapper.load_mapped_ids,
|
||||
kwargs={'load_all': True})
|
||||
|
||||
MEMCACHE['history_tab_limit'] = 11
|
||||
MEMCACHE['history_tab_limit'] = 13
|
||||
MEMCACHE['history_tab'] = History.menu_tab(MEMCACHE['history_tab_limit'])
|
||||
|
||||
try:
|
||||
|
@ -2241,15 +2244,18 @@ def save_config():
|
|||
('Metacritic', [
|
||||
('mru', MC_MRU)
|
||||
]),
|
||||
('NextEpisode', [
|
||||
('mru', NE_MRU)
|
||||
]),
|
||||
('TMDB', [
|
||||
('mru', TMDB_MRU)
|
||||
]),
|
||||
('TVCalendar', [
|
||||
('mru', TVC_MRU)
|
||||
]),
|
||||
('TVmaze', [
|
||||
('mru', TVM_MRU)
|
||||
]),
|
||||
('NextEpisode', [
|
||||
('mru', NE_MRU)
|
||||
]),
|
||||
('Slack', [
|
||||
('use_%s', int(USE_SLACK)),
|
||||
('channel', SLACK_CHANNEL),
|
||||
|
|
|
@ -290,7 +290,10 @@ def search_infosrc_for_show_id(reg_show_name, tvid=None, prodid=None, ui=None):
|
|||
logger.debug('Trying to find %s on %s' % (cur_name, sickgear.TVInfoAPI(cur_tvid).name))
|
||||
|
||||
try:
|
||||
show_info_list = t[prodid] if prodid else t[cur_name]
|
||||
if prodid:
|
||||
show_info_list = t.get_show(prodid)
|
||||
else:
|
||||
show_info_list = t.search_show(cur_name)
|
||||
show_info_list = show_info_list if isinstance(show_info_list, list) else [show_info_list]
|
||||
except (BaseException, Exception):
|
||||
continue
|
||||
|
@ -989,7 +992,7 @@ def validate_show(show_obj, season=None, episode=None):
|
|||
if season is None and episode is None:
|
||||
return t
|
||||
|
||||
return t[show_obj.prodid][season][episode]
|
||||
return t.get_show(show_obj.prodid, language=show_obj.lang)[season][episode]
|
||||
except (BaseTVinfoEpisodenotfound, BaseTVinfoSeasonnotfound, TypeError):
|
||||
pass
|
||||
|
||||
|
|
|
@ -7,7 +7,8 @@ from lib.api_imdb.imdb_api import IMDbIndexer
|
|||
from lib.tvinfo_base import (
|
||||
TVINFO_FACEBOOK, TVINFO_INSTAGRAM, TVINFO_TWITTER, TVINFO_WIKIPEDIA,
|
||||
TVINFO_IMDB, TVINFO_TMDB, TVINFO_TRAKT, TVINFO_TVDB, TVINFO_TVMAZE, TVINFO_TVRAGE,
|
||||
TVINFO_TRAKT_SLUG, TVINFO_TVDB_SLUG
|
||||
TVINFO_TRAKT_SLUG, TVINFO_TVDB_SLUG, TVINFO_TIKTOK, TVINFO_WIKIDATA, TVINFO_LINKEDIN, TVINFO_FANSITE,
|
||||
TVINFO_REDDIT, TVINFO_YOUTUBE
|
||||
)
|
||||
|
||||
init_config = {
|
||||
|
@ -134,6 +135,72 @@ tvinfo_config = {
|
|||
show_url=None,
|
||||
people_only=True,
|
||||
icon='wikipedia16.png'
|
||||
),
|
||||
TVINFO_TIKTOK: dict(
|
||||
id=TVINFO_TIKTOK,
|
||||
name='TikTok',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='https://www.tiktok.com/@%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='tiktok16.png'
|
||||
),
|
||||
TVINFO_WIKIDATA: dict(
|
||||
id=TVINFO_WIKIDATA,
|
||||
name='Wikidata',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='https://www.wikidata.org/wiki/%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='wikidata16.png'
|
||||
),
|
||||
TVINFO_REDDIT: dict(
|
||||
id=TVINFO_REDDIT,
|
||||
name='Reddit',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='http://www.reddit.com/r/%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='reddit16.png'
|
||||
),
|
||||
TVINFO_YOUTUBE: dict(
|
||||
id=TVINFO_YOUTUBE,
|
||||
name='Reddit',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='https://www.youtube.com/c/%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='youtube16.png'
|
||||
),
|
||||
TVINFO_FANSITE: dict(
|
||||
id=TVINFO_FANSITE,
|
||||
name='Fansite',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='_placeholder16.png'
|
||||
),
|
||||
TVINFO_LINKEDIN: dict(
|
||||
id=TVINFO_LINKEDIN,
|
||||
name='Linkedin',
|
||||
module=None,
|
||||
active=False,
|
||||
mapped_only=True,
|
||||
people_url='https://www.linkedin.com/in/%s',
|
||||
show_url=None,
|
||||
people_only=True,
|
||||
icon='linkedin16.png'
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -854,7 +854,7 @@ class GenericMetadata(object):
|
|||
|
||||
t = sickgear.TVInfoAPI(tv_id).setup(**tvinfo_config)
|
||||
return t.get_show((show_obj.ids[tv_id]['id'], show_obj.prodid)[tv_src == show_obj.tvid],
|
||||
load_episodes=False, banners=True, posters=True, fanart=True, language=show_obj.lang)
|
||||
load_episodes=False, banners=True, posters=True, fanart=True, language=show_lang)
|
||||
except (BaseTVinfoError, IOError) as e:
|
||||
logger.warning(f'Unable to look up show on {sickgear.TVInfoAPI(tv_id).name},'
|
||||
f' not downloading images: {ex(e)}')
|
||||
|
|
|
@ -123,7 +123,7 @@ class KODIMetadata(generic.GenericMetadata):
|
|||
tv_node = etree.Element('tvshow')
|
||||
|
||||
try:
|
||||
show_info = t.get_show(show_obj.prodid, language=show_obj.lang)
|
||||
show_info = t.get_show(show_id, language=show_obj.lang)
|
||||
except BaseTVinfoShownotfound as e:
|
||||
logger.error(f'Unable to find show with id {show_id} on {sickgear.TVInfoAPI(show_obj.tvid).name},'
|
||||
f' skipping it')
|
||||
|
|
|
@ -375,7 +375,8 @@ class NameParser(object):
|
|||
|
||||
t = sickgear.TVInfoAPI(show_obj.tvid).setup(**tvinfo_config)
|
||||
|
||||
ep_obj = t[show_obj.prodid].aired_on(best_result.air_date)[0]
|
||||
ep_obj = t.get_show(show_obj.prodid, language=show_obj.lang).aired_on(
|
||||
best_result.air_date)[0]
|
||||
|
||||
season_number = int(ep_obj['seasonnumber'])
|
||||
episode_numbers = [int(ep_obj['episodenumber'])]
|
||||
|
|
|
@ -971,13 +971,15 @@ class QueueItemAdd(ShowQueueItem):
|
|||
try:
|
||||
|
||||
tvinfo_config = sickgear.TVInfoAPI(self.tvid).api_params.copy()
|
||||
kw = {}
|
||||
if self.lang:
|
||||
tvinfo_config['language'] = self.lang
|
||||
kw = {'language': self.lang}
|
||||
|
||||
logger.log(f'{sickgear.TVInfoAPI(self.tvid).name}: {repr(tvinfo_config)}')
|
||||
|
||||
t = sickgear.TVInfoAPI(self.tvid).setup(**tvinfo_config)
|
||||
s = t.get_show(self.prodid, load_episodes=False, language=self.lang)
|
||||
s = t.get_show(self.prodid, load_episodes=False, **kw)
|
||||
|
||||
if getattr(t, 'show_not_found', False):
|
||||
logger.error(f'Show {self.show_name} was not found on {sickgear.TVInfoAPI(self.tvid).name},'
|
||||
|
@ -1676,7 +1678,7 @@ class QueueItemSwitchSource(ShowQueueItem):
|
|||
tvinfo_config['dvdorder'] = 0 != self.show_obj._dvdorder
|
||||
t = sickgear.TVInfoAPI(self.new_tvid).setup(**tvinfo_config)
|
||||
try:
|
||||
td = t.get_show(show_id=new_prodid, actors=True)
|
||||
td = t.get_show(show_id=new_prodid, actors=True, language=self.show_obj._lang)
|
||||
except (BaseException, Exception):
|
||||
td = None
|
||||
if not self.force_id:
|
||||
|
@ -1684,7 +1686,7 @@ class QueueItemSwitchSource(ShowQueueItem):
|
|||
if new_prodid != self.show_obj.ids.get(self.new_tvid, {}).get('id') is not None:
|
||||
new_prodid = self.show_obj.ids.get(self.new_tvid, {}).get('id')
|
||||
try:
|
||||
td = t.get_show(show_id=new_prodid, actors=True, language=self.show_obj.lang)
|
||||
td = t.get_show(show_id=new_prodid, actors=True, language=self.show_obj._lang)
|
||||
except (BaseException, Exception):
|
||||
td = None
|
||||
logger.warning(f'Failed to get new tv show id ({new_prodid})'
|
||||
|
|
637
sickgear/tv.py
|
@ -3365,7 +3365,7 @@ class CMD_SickGearShowAddExisting(ApiCall):
|
|||
t = sickgear.TVInfoAPI(self.tvid).setup(**lINDEXER_API_PARMS)
|
||||
|
||||
try:
|
||||
myShow = t[int(self.prodid), False]
|
||||
myShow = t.get_show(self.prodid, load_episodes=False)
|
||||
except BaseTVinfoError as e:
|
||||
self.log(f'Unable to find show with id {self.tvid}', logger.WARNING)
|
||||
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
|
||||
|
@ -3528,7 +3528,7 @@ class CMD_SickGearShowAddNew(ApiCall):
|
|||
t = sickgear.TVInfoAPI(self.tvid).setup(**lINDEXER_API_PARMS)
|
||||
|
||||
try:
|
||||
myShow = t[int(self.prodid), False]
|
||||
myShow = t.get_show(self.prodid, load_episodes=False)
|
||||
except BaseTVinfoError as e:
|
||||
self.log(f'Unable to find show with id {self.tvid}', logger.WARNING)
|
||||
return _responds(RESULT_FAILURE, msg="Unable to retrieve information from indexer")
|
||||
|
@ -4631,24 +4631,22 @@ class CMD_SickGearShowsBrowseTrakt(ApiCall):
|
|||
|
||||
def run(self):
|
||||
""" browse trakt shows in sickgear """
|
||||
urls = {'anticipated': 'shows/anticipated?limit=%s&' % 100,
|
||||
'newshows': '/calendars/all/shows/new/%s/%s?' % (SGDatetime.sbfdate(
|
||||
dt=datetime.datetime.now() + datetime.timedelta(days=-16), d_preset='%Y-%m-%d'), 32),
|
||||
'newseasons': '/calendars/all/shows/premieres/%s/%s?' % (SGDatetime.sbfdate(
|
||||
dt=datetime.datetime.now() + datetime.timedelta(days=-16), d_preset='%Y-%m-%d'), 32),
|
||||
'popular': 'shows/popular?limit=%s&' % 100,
|
||||
'trending': 'shows/trending?limit=%s&' % 100,
|
||||
'recommended': 'recommendations/shows?limit=%s&' % 100,
|
||||
}
|
||||
func = dict(anticipated='get_anticipated',
|
||||
newshows='get_new_shows',
|
||||
newseasons='get_new_seasons',
|
||||
popular='get_popular',
|
||||
trending='get_trending')
|
||||
kwargs = {}
|
||||
if self.type in ('recommended', 'watchlist'):
|
||||
if not self.account:
|
||||
return _responds(RESULT_FAILURE, msg='Need Trakt account')
|
||||
kwargs['send_oauth'] = self.account
|
||||
urls['watchlist'] = 'users/%s/watchlist/shows?limit=%s&' \
|
||||
% (sickgear.TRAKT_ACCOUNTS[self.account].slug, 100)
|
||||
func.update(dict(recommended='get_recommended_for_account',
|
||||
watchlist='get_watchlisted_for_account'))
|
||||
kwargs.update(dict(account=self.account, ignore_collected=True))
|
||||
if self.type in ('recommended',):
|
||||
kwargs.update(dict(ignore_watchlisted=True))
|
||||
try:
|
||||
data, oldest, newest = AddShows.get_trakt_data(urls[self.type], **kwargs)
|
||||
data, oldest, newest = AddShows.get_trakt_data(func[self.type], **kwargs)
|
||||
except Exception as e:
|
||||
return _responds(RESULT_FAILURE, msg=ex(e))
|
||||
return _responds(RESULT_SUCCESS, data)
|
||||
|
|