Merge branch 'release/0.4.0'

This commit is contained in:
adam 2014-12-04 18:52:44 +08:00
commit 8bc6e7bbea
81 changed files with 5740 additions and 5537 deletions

View file

@ -1,7 +1,63 @@
### 0.4.0 (2014-12-04 10:50:00 UTC)
* Change footer stats to not add newlines when copy/pasting from them
* Remove redundant references from Config/Help & Info
* Fix poster preview on small poster layout
* Change overhaul Config/Anime to be in line with General Configuration
* Change descriptions and layout on Config/Anime page
* Remove output of source code line when warnings highlight libraries not used with IMDb
* Add dropdown on Add Trending Shows to display all shows, shows not in library, or shows in library
* Change Help and Info icon sprites to color and text of Arguments if unused
* Change sharper looking heart image on the Add Show page
* Change Add Show on Add Trending Show page to use the full Add New Show flow
* Fix adding shows with titles that contain "&" on Add Trending Show page
* Fix unset vars on Add New Shows page used in the Add Existing Shows context
* Remove unneeded datetime convert from Coming Episodes page
* Fix the log output of the limited backlog search for episodes missed
* Remove unsupported t411 search provider
* Remove obsolete Animezb search provider
* Add option to treat anime releases that lack a quality tag as HDTV instead of "unknown"
* Remove old version checking code that no longer applies to SickGear's release system
* Fix pnotify notifications going full page
* Change overhaul Config Post Processing to be in line with General Configuration
* Change rearrange Config Post Processing items into sections for easier use
* Fix CSS overriding link colors on config pages
* Change Config Post Processing texts and descriptions throughout
* Fix Config Post Processing info icons in "Naming Legends"
* Change Config Post Processing naming sample lines to be more available
* Add Config Post Processing failed downloads Sabnzbd setup guide
* Fix Config Post Processing "Anime name pattern" custom javascript validation
* Add check that SSLv3 is available before use by requests lib
* Update Requests library 2.3.0 to 2.4.3 (9dc6602)
* Change suppress HTTPS verification InsecureRequestWarning as many sites use self-certified certificates
* Fix API endpoint Episode.SetStatus to "Wanted"
* Change airdateModifyStamp to handle hour that is "00:00"
* Fix a handler when ShowData is not available in TVDB and TVRage APIs
* Fix a handler when EpisodeData is not available in TVDB and TVRage APIs
* Add TVRage "Canceled/Ended" as "Ended" status to sort on Simple Layout of Show List page
* Fix qtips on Display Show and Config Post Processing
* Fix glitch above rating stars on Display Show page
* Change overhaul Config/Search Providers
* Change Config/Search Providers texts and descriptions
* Fix display when no providers are visible on Config/Search Providers
* Fix failing "Search Settings" link that is shown on Config/Search Providers when Torrents Search is not enabled
* Fix failing "Providers" link on Config/Search Settings/Episode Search
* Change case of labels in General Config/Interface/Timezone
* Split enabled from not enabled providers in the Configure Provider drop down on the Providers Options tab
* Fix typo on General Config/Misc
* Fix Add Trending Shows "Not In library" now filters tvrage added shows
* Add a hover over text "In library" on Add Trending Shows to display tv database show was added from
* Fix reduces time API endpoint Shows takes to return results
* Fix Coming Eps Page to include shows +/- 1 day for time zone corrections
* Fix season jumping dropdown menu for shows with over 15 seasons on Display Show
* Fix article sorting for Coming Eps, Manage, Show List, Display Show, API, and Trending Shows pages
### 0.3.1 (2014-11-19 16:40:00 UTC) ### 0.3.1 (2014-11-19 16:40:00 UTC)
* Fix failing travis test * Fix failing travis test
### 0.3.0 (2014-11-12 14:30:00 UTC) ### 0.3.0 (2014-11-12 14:30:00 UTC)
* Change logos, text etc. branding to SickGear * Change logos, text etc. branding to SickGear

View file

@ -1,3 +1,5 @@
Libs with customisations... Libs with customisations...
/tornado /tornado
/lib/requests/packages/urllib3/contrib/pyopenssl.py
/lib/requests/packages/urllib3/connectionpool.py

View file

@ -1,250 +0,0 @@
#!/usr/bin/env python2
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
def upload_file(file, project, summary, labels, username, password):
status, reason, url = upload_find_auth(file, project,
summary, labels,
username, password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -199,6 +199,10 @@ inc_top.tmpl
background: #dcdcdc url("../css/lib/images/ui-bg_highlight-soft_75_dcdcdc_1x100.png") 50% top repeat-x; background: #dcdcdc url("../css/lib/images/ui-bg_highlight-soft_75_dcdcdc_1x100.png") 50% top repeat-x;
} }
.ui-widget-content a {
text-decoration: none;
}
.ui-widget-header { .ui-widget-header {
background: #ffffff url("../css/lib/images/ui-bg_flat_0_ffffff_40x100.png") 50% 50% repeat-x; background: #ffffff url("../css/lib/images/ui-bg_flat_0_ffffff_40x100.png") 50% 50% repeat-x;
} }
@ -967,8 +971,7 @@ displayShow.tmpl
#posterCol { #posterCol {
float: left; float: left;
margin-right: 10px; margin: 3px 10px 20px 0;
margin-bottom: 20px;
} }
#showCol { #showCol {
@ -1016,22 +1019,33 @@ h1.title {
span.imdbstars { span.imdbstars {
display: inline-block; display: inline-block;
vertical-align: top; margin: 0 3px 0 0;
vertical-align: baseline;
cursor: help; cursor: help;
margin-top: 4px;
} }
span.imdbstars > * {
background-position: 0 0 !important;
max-width:120px;
}
span.imdbstars, span.imdbstars > * { span.imdbstars, span.imdbstars > * {
height: 12px; height: 12px;
background: url(../images/rating.png) 0 -12px repeat-x;
width: 120px; width: 120px;
display: inline-block; display: inline-block;
vertical-align: top; font-size:10px
} }
span.imdbstars > * { #showinfo .flag {
background-position: 0 0; margin: 0 3px 0 0;
max-width:120px; vertical-align: baseline;
}
#showinfo .imdb-info {
margin: 0 3px 0 0;
}
#showinfo a.service {
margin: 0 3px 0 0;
font-size: 16px;
} }
ul.tags { ul.tags {
@ -1512,6 +1526,7 @@ config*.tmpl
.component-group-desc{ .component-group-desc{
float: left; float: left;
width: 250px; width: 250px;
padding-right: 10px;
} }
.component-group-desc h3{ .component-group-desc h3{
@ -1519,7 +1534,6 @@ config*.tmpl
} }
.component-group-desc p { .component-group-desc p {
width: 90%;
margin: 10px 0; margin: 10px 0;
color: #666; color: #666;
} }
@ -1530,7 +1544,7 @@ config*.tmpl
#config div.field-pair select, #config div.field-pair select,
#config div.field-pair input { #config div.field-pair input {
margin-right: 6px; margin-right: 15px;
} }
#config div.field-pair input { #config div.field-pair input {
@ -1558,7 +1572,7 @@ config*.tmpl
} }
#config label.space-right { #config label.space-right {
margin-right:10px margin-right:20px
} }
#config .metadataDiv { #config .metadataDiv {
display: none; display: none;
@ -1598,16 +1612,56 @@ select .selected {
list-style-type: none; list-style-type: none;
} }
#config.search_providers #core-component-group1 #provider_key h4 {
display: inline-block;
float: left;
margin: 0;
}
#config.search_providers #core-component-group1 #provider_key p {
margin: 0 0 20px 30px;
}
#config.search_providers #core-component-group1 .component-group-desc,
#config.search_providers #provider_order_list,
#config.search_providers #core-component-group1 #provider_key {
width: 300px
}
#config.search_providers #provider_order_list {
padding: 0;
float: left
}
#config.search_providers #provider_order_list,
#config.search_providers #core-component-group1 .btn {
margin: 0 auto
}
#config.search_providers #core-component-group1 .btn {
display: block
}
#config.search_providers #core-component-group1 #provider_key {
float: right;
margin-bottom:25px
}
#provider_order_list li, #provider_order_list li,
#service_order_list li { #service_order_list li {
padding: 5px; padding: 5px;
margin: 5px 0; margin: 0 0 5px;
font-size: 14px; font-size: 14px;
} }
#provider_order_list input, #provider_order_list input,
#service_order_list input { #service_order_list input {
margin: 0px 2px; margin: 0 5px 0 2px;
vertical-align: middle;
}
#provider_order_list a.imgLink {
margin-right: 3px
} }
#config .tip_scale label span.component-title { #config .tip_scale label span.component-title {
@ -1631,38 +1685,50 @@ select .selected {
} }
[class^="icon16-"], [class*=" icon16-"] { [class^="icon16-"], [class*=" icon16-"] {
background-image: url("../images/glyphicons-config-black.png"); background-image: url("../images/glyphicons-config.png");
background-position: -40px 0;
background-repeat: no-repeat; background-repeat: no-repeat;
display: inline-block; display: inline-block;
height: 16px; height: 16px;
line-height: 16px;
vertical-align: text-top; vertical-align: text-top;
width: 16px; width: 16px;
margin-top: 1px;
} }
.icon16-github { .icon16-github {
background-position: 0 0; background-position: 0 0;
} }
.icon16-mirc { .icon16-mirc {
background-position: -20px 0; background-position: -26px 0;
} }
.icon16-sb { .icon16-sg {
background-position: -40px 0; background-position: -52px 0;
} }
.icon16-web { .icon16-web {
background-position: -60px 0; background-position: -78px 0;
} }
.icon16-win { .icon16-win {
background-position: -80px 0; background-position: -104px 0;
} }
/* ======================================================================= /* =======================================================================
config_postProcessing.tmpl config_postProcessing.tmpl
========================================================================== */ ========================================================================== */
#config .episode-sample {
width: 240px;
margin-right: 10px;
border: 1px solid;
}
#config .episode-sample h3 {
margin: 10px;
font-size: 18px;
line-height: 24px;
}
#config div.example { #config div.example {
padding: 10px; background-color: #efefef; padding: 10px;
border: 1px solid;
} }
.Key { .Key {
@ -1710,43 +1776,47 @@ div.metadata_options_wrapper {
} }
div.metadata_example_wrapper { div.metadata_example_wrapper {
float: right; margin-left: 220px;
width: 325px; }
div.metadata_options_wrapper h4,
div.metadata_example_wrapper h4 {
margin: 0 0 10px;
} }
div.metadata_options { div.metadata_options {
padding: 7px;
overflow: auto; overflow: auto;
background: #f5f1e4; background: #f5f1e4;
border: 1px solid #ccc; border: 1px solid #ccc;
} }
div.metadata_options label:hover { div.metadata_options label:hover {
color: #fff;
background-color: #57442b;
cursor: pointer; cursor: pointer;
} }
div.metadata_options label { div.metadata_options label {
display: block;
padding-left: 7px;
line-height: 20px;
color: #036; color: #036;
} }
div.metadata_example { div.metadata_example label {
padding: 8px; cursor: pointer;
font-weight: 600;
} }
div.metadata_options label,
div.metadata_example label { div.metadata_example label {
display: block;
line-height: 21px; line-height: 21px;
color: #000; display: block;
cursor: pointer; padding: 3px;
margin: 0px;
}
div.metadata_options input {
margin-right: 3px;
vertical-align: baseline;
} }
div.metadataDiv .disabled { div.metadataDiv .disabled {
color: #ccc; font-weight: normal;
} }
.notifier-icon { .notifier-icon {
@ -1754,11 +1824,6 @@ div.metadataDiv .disabled {
margin: 6px 4px 0px 0px; margin: 6px 4px 0px 0px;
} }
.warning {
border-color: #F89406;
background: url("../images/warning16.png") no-repeat right 5px center #fff;
}
/* ======================================================================= /* =======================================================================
manage*.tmpl manage*.tmpl
========================================================================== */ ========================================================================== */
@ -2153,34 +2218,15 @@ fieldset[disabled] .navbar-default .btn-link:focus {
font-size: 12px; font-size: 12px;
line-height: 16px; line-height: 16px;
*line-height: 20px; *line-height: 20px;
color: #333333;
text-align: center; text-align: center;
text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
vertical-align: middle; vertical-align: middle;
cursor: pointer; cursor: pointer;
background-color: #f5f5f5;
*background-color: #e6e6e6;
background-image: -ms-linear-gradient(top, #ffffff, #e6e6e6);
background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ffffff), to(#e6e6e6));
background-image: -webkit-linear-gradient(top, #ffffff, #e6e6e6);
background-image: -o-linear-gradient(top, #ffffff, #e6e6e6);
background-image: linear-gradient(top, #ffffff, #e6e6e6);
background-image: -moz-linear-gradient(top, #ffffff, #e6e6e6);
background-repeat: repeat-x; background-repeat: repeat-x;
border: 1px solid #cccccc;
*border: 0; *border: 0;
border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
border-color: #e6e6e6 #e6e6e6 #bfbfbf;
border-bottom-color: #b3b3b3;
-webkit-border-radius: 4px; -webkit-border-radius: 4px;
-moz-border-radius: 4px; -moz-border-radius: 4px;
border-radius: 4px; border-radius: 4px;
filter: progid:dximagetransform.microsoft.gradient(startColorstr='#ffffff', endColorstr='#e6e6e6', GradientType=0);
filter: progid:dximagetransform.microsoft.gradient(enabled=false);
*zoom: 1; *zoom: 1;
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
-moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.2), 0 1px 2px rgba(0, 0, 0, 0.05);
} }
.btn:hover, .btn:hover,
@ -3007,16 +3053,37 @@ span.token-input-delete-token {
margin: 0 1px; margin: 0 1px;
} }
.boldest {font-weight: 900}
.red-text {color:#d33} .red-text {color:#d33}
.clear-left {clear:left} .clear-left {clear:left}
.float-left {float:left}
.nextline-block {display:block} .nextline-block {display:block}
#failed-guide,
#failed-guide .title,
#failed-guide li {margin:0; padding:0}
#failed-guide .title {list-style-type: none}
#failed-guide li {margin-left:15px}
.icon-info-sign {
display: block;
width: 16px;
height: 16px;
margin: 2px 5px;
float: left;
}
.pp .component-group-list.right,
.pp .field-pair.right {
margin: 0 0 0 250px;
}
.trakt-image { .trakt-image {
display: block; display: block;
width: 100%; width: 100%;
height: 100%; height: 100%;
z-index: 0; z-index: 0;
background-image: url(/images/poster-dark.jpg) background-image: url(../images/poster-dark.jpg)
} }
/* ======================================================================= /* =======================================================================
jquery.confirm.css jquery.confirm.css
@ -3028,7 +3095,7 @@ jquery.confirm.css
position: fixed; position: fixed;
top: 0; top: 0;
left: 0; left: 0;
background: url('../images/bg.gif'); background: url(../images/bg.gif);
background: -moz-linear-gradient(rgba(0,0,0,0.5), rgba(0,0,0,0.5)) repeat-x rgba(0,0,0,0.5); background: -moz-linear-gradient(rgba(0,0,0,0.5), rgba(0,0,0,0.5)) repeat-x rgba(0,0,0,0.5);
background:-webkit-gradient(linear, 0% 0%, 0% 100%, from(rgba(0,0,0,0.5)), to(rgba(0,0,0,0.5))) repeat-x rgba(0,0,0,0.5); background:-webkit-gradient(linear, 0% 0%, 0% 100%, from(rgba(0,0,0,0.5)), to(rgba(0,0,0,0.5))) repeat-x rgba(0,0,0,0.5);
z-index: 100000; z-index: 100000;
@ -3056,13 +3123,13 @@ jquery.confirm.css
color: #fff; color: #fff;
margin: 0; margin: 0;
font-size: 22px; font-size: 22px;
text-shadow: 0px 1px 1px rgba(0, 0, 0, 0.75); text-shadow: 0 1px 1px rgba(0, 0, 0, 0.75);
} }
#confirmBox p { #confirmBox p {
padding-top: 20px; padding-top: 20px;
color: #000; color: #000;
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.75); text-shadow: 0 1px 1px rgba(255, 255, 255, 0.75);
} }
#confirmButtons { #confirmButtons {
@ -3118,10 +3185,10 @@ pnotify.css
========================================================================== */ ========================================================================== */
.ui-pnotify-sticker { .ui-pnotify-sticker {
margin-top: -12px; margin-top: -12px;
} }
.ui-pnotify-closer { .ui-pnotify-closer {
margin-top: -12px; margin-top: -12px;
margin-right: -10px; margin-right: -10px;
} }

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.2 KiB

View file

@ -82,7 +82,7 @@
if (0 == s.indexOf('Loading...')) if (0 == s.indexOf('Loading...'))
return s.replace('Loading...', '000') return s.replace('Loading...', '000')
#if not $sickbeard.SORT_ARTICLE: #if not $sickbeard.SORT_ARTICLE:
return (s || '').replace(/^(The|A|An)\s/i, '') return (s || '').replace(/^(?:(?:A(?!\s+to)n?)|The)\s(\w)/i, '$1')
#else: #else:
return (s || '') return (s || '')
#end if #end if
@ -359,10 +359,10 @@
#set $too_late_header = True #set $too_late_header = True
#elif $cur_ep_enddate >= $today and $cur_ep_airdate < $next_week.date(): #elif $cur_ep_enddate >= $today and $cur_ep_airdate < $next_week.date():
#if $cur_ep_airdate == $today.date(): #if $cur_ep_airdate == $today.date():
<br /><h2 class="day">$datetime.date.fromordinal($cur_ep_airdate.toordinal).strftime('%A').decode($sickbeard.SYS_ENCODING).capitalize() <span style="font-size: 14px; vertical-align: top;">[Today]</span></h2> <br /><h2 class="day">$sbdatetime.sbdatetime.sbfdate($cur_ep_airdate, '%A').decode($sickbeard.SYS_ENCODING).capitalize() <span style="font-size: 14px; vertical-align: top;">[Today]</span></h2>
#set $today_header = True #set $today_header = True
#else: #else:
<br /><h2 class="day">$datetime.date.fromordinal($cur_ep_airdate.toordinal).strftime('%A').decode($sickbeard.SYS_ENCODING).capitalize()</h2> <br /><h2 class="day">$sbdatetime.sbdatetime.sbfdate($cur_ep_airdate, '%A').decode($sickbeard.SYS_ENCODING).capitalize()</h2>
#end if #end if
#end if #end if
#end if #end if
@ -371,10 +371,11 @@
#if $cur_ep_airdate == $today.date() and not $today_header: #if $cur_ep_airdate == $today.date() and not $today_header:
<div class="comingepheader"> <div class="comingepheader">
<br /><h2 class="day">$datetime.date.fromordinal($cur_ep_airdate.toordinal).strftime('%A').decode($sickbeard.SYS_ENCODING).capitalize() <span style="font-size: 14px; vertical-align: top;">[Today]</span></h2> <br /><h2 class="day">$sbdatetime.sbdatetime.sbfdate($cur_ep_airdate, '%A').decode($sickbeard.SYS_ENCODING).capitalize() <span style="font-size: 14px; vertical-align: top;">[Today]</span></h2>
#set $today_header = True #set $today_header = True
#end if #end if
#if $runtime: #if $runtime:
#set $cur_ep_enddate = $cur_result['localtime'] + datetime.timedelta(minutes = $runtime)
#if $cur_ep_enddate < $today: #if $cur_ep_enddate < $today:
#set $show_div = 'ep_listing listing-overdue' #set $show_div = 'ep_listing listing-overdue'
#elif $cur_ep_airdate >= $next_week.date(): #elif $cur_ep_airdate >= $next_week.date():
@ -489,7 +490,7 @@
#for $day in $dates #for $day in $dates
#set $tbl_day += 1 #set $tbl_day += 1
<table class="sickbeardTable tablesorter calendarTable <%= 'cal-%s' % (('even', 'odd')[1 == tbl_day % 2]) %>" cellspacing="0" border="0" cellpadding="0"> <table class="sickbeardTable tablesorter calendarTable <%= 'cal-%s' % (('even', 'odd')[1 == tbl_day % 2]) %>" cellspacing="0" border="0" cellpadding="0">
<thead><tr><th>$day.strftime('%A').decode($sickbeard.SYS_ENCODING).capitalize()</th></tr></thead> <thead><tr><th>$sbdatetime.sbdatetime.sbfdate($day, '%A').decode($sickbeard.SYS_ENCODING).capitalize()</th></tr></thead>
<tbody> <tbody>
#set $day_has_show = False #set $day_has_show = False
#for $cur_result in $sql_results: #for $cur_result in $sql_results:
@ -503,7 +504,7 @@
#if $airday == $day: #if $airday == $day:
#set $day_has_show = True #set $day_has_show = True
#set $airtime = $sbdatetime.sbdatetime.fromtimestamp($time.mktime($cur_result['localtime'].timetuple())).sbftime().decode($sickbeard.SYS_ENCODING) #set $airtime = $sbdatetime.sbdatetime.sbftime($cur_result['localtime']).decode($sickbeard.SYS_ENCODING)
#if $sickbeard.TRIM_ZERO: #if $sickbeard.TRIM_ZERO:
#set $airtime = re.sub(r'0(\d:\d\d)', r'\1', $airtime, 0, re.IGNORECASE | re.MULTILINE) #set $airtime = re.sub(r'0(\d:\d\d)', r'\1', $airtime, 0, re.IGNORECASE | re.MULTILINE)
#end if #end if

View file

@ -3,18 +3,18 @@
#from sickbeard.helpers import anon_url #from sickbeard.helpers import anon_url
#import os.path #import os.path
#set global $title="Configuration" #set global $title = 'Configuration'
#set global $header="Configuration" #set global $header = 'Configuration'
#set global $sbPath=".." #set global $sbPath = '..'
#set global $topmenu="config"# #set global $topmenu = 'config'
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl") #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_top.tmpl')
#if $varExists('header') #if $varExists('header')
<h1 class="header">$header</h1> <h1 class='header'>$header</h1>
#else #else
<h1 class="title">$title</h1> <h1 class='title'>$title</h1>
#end if #end if
##set cpu_usage = $psutil.cpu_percent() ##set cpu_usage = $psutil.cpu_percent()
@ -31,24 +31,24 @@
<div id="config-content"> <div id="config-content">
<table class="infoTable" cellspacing="1" border="0" cellpadding="0" width="100%"> <table class="infoTable" cellspacing="1" border="0" cellpadding="0" width="100%">
<tr><td class="infoTableHeader">Version: </td><td class="infoTableCell"> <tr><td class="infoTableHeader">Version: </td><td class="infoTableCell">
#if $sickbeard.VERSION_NOTIFY #if $sickbeard.VERSION_NOTIFY
BRANCH: ($sickbeard.BRANCH) / COMMIT: ($sickbeard.CUR_COMMIT_HASH) <!-- &ndash; build.date //--><br /> BRANCH: ($sickbeard.BRANCH) / COMMIT: ($sickbeard.CUR_COMMIT_HASH) <!-- &ndash; build.date //--><br />
#else #else
You don't have version checking turned on. Please turn on "Check for Update" in Config > General.<br /> You don't have version checking turned on, see "Check software updates" in Config > General.<br />
#end if #end if
<font color="red">You are using BETA software</font> <em class="red-text">This is BETA software.</em>
</td></tr> </td></tr>
<tr><td class="infoTableHeader">SR Config file:</td><td class="infoTableCell">$sickbeard.CONFIG_FILE</td></tr> <tr><td class="infoTableHeader">Config file:</td><td class="infoTableCell">$sickbeard.CONFIG_FILE</td></tr>
<tr><td class="infoTableHeader">SR Database file:</td><td class="infoTableCell">$db.dbFilename()</td></tr> <tr><td class="infoTableHeader">Database file:</td><td class="infoTableCell">$db.dbFilename()</td></tr>
<tr><td class="infoTableHeader">SR Cache Dir:</td><td class="infoTableCell">$sickbeard.CACHE_DIR</td></tr> <tr><td class="infoTableHeader">Cache Dir:</td><td class="infoTableCell">$sickbeard.CACHE_DIR</td></tr>
<tr><td class="infoTableHeader">SR Arguments:</td><td class="infoTableCell">$sickbeard.MY_ARGS</td></tr> <tr><td class="infoTableHeader">Arguments:</td><td class="infoTableCell"><%= (sickbeard.MY_ARGS, 'None used')[0 == len(sickbeard.MY_ARGS)] %></td></tr>
<tr><td class="infoTableHeader">SR Web Root:</td><td class="infoTableCell">$sickbeard.WEB_ROOT</td></tr> <tr><td class="infoTableHeader">Web Root:</td><td class="infoTableCell">$sickbeard.WEB_ROOT</td></tr>
<tr><td class="infoTableHeader">Python Version:</td><td class="infoTableCell">$sys.version[:120]</td></tr> <tr><td class="infoTableHeader">Python Version:</td><td class="infoTableCell">$sys.version[:120]</td></tr>
<tr class="infoTableSeperator"><td class="infoTableHeader"><i class="icon16-sb"></i> Homepage</td><td class="infoTableCell"><a href="<%= anon_url('https://github.com/SickGear/SickGear/wiki') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">https://github.com/SickGear/SickGear/wiki</a></td></tr> <tr class="infoTableSeperator"><td class="infoTableHeader"><i class="icon16-sg"></i> Homepage</td><td class="infoTableCell"><a href="<%= anon_url('https://github.com/SickGear/SickGear/wiki') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">https://github.com/SickGear/SickGear/wiki</a></td></tr>
<tr><td class="infoTableHeader"><i class="icon16-github"></i> Source</td><td class="infoTableCell"><a href="<%= anon_url('https://github.com/SickGear/SickGear/') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">https://github.com/SickGear/SickGear/</a></td></tr> <tr><td class="infoTableHeader"><i class="icon16-github"></i> Source</td><td class="infoTableCell"><a href="<%= anon_url('https://github.com/SickGear/SickGear/') %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;">https://github.com/SickGear/SickGear/</a></td></tr>
<tr><td class="infoTableHeader"><i class="icon16-mirc"></i> Internet Relay Chat</td><td class="infoTableCell"><a href="irc://irc.freenode.net/#SickGear" rel="noreferrer"><i>#SickGear</i> on <i>irc.freenode.net</i></a></td></tr> <tr><td class="infoTableHeader"><i class="icon16-mirc"></i> Internet Relay Chat</td><td class="infoTableCell"><a href="irc://irc.freenode.net/#SickGear" rel="noreferrer"><i>#SickGear</i> on <i>irc.freenode.net</i></a></td></tr>
</table> </table>
</div> </div>
#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl") #include $os.path.join($sickbeard.PROG_DIR,'gui/slick/interfaces/default/inc_bottom.tmpl')

View file

@ -1,109 +1,128 @@
#import sickbeard #import sickbeard
#from sickbeard.helpers import anon_url #from sickbeard.helpers import anon_url
#set global $title="Config - Anime" #set global $title = 'Config - Anime'
#set global $header="Anime" #set global $header = 'Anime'
#set global $sbPath="../.." #set global $sbPath = '../..'
#set global $topmenu="config"# #set global $topmenu = 'config'
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl") #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_top.tmpl')
<script type="text/javascript" src="$sbRoot/js/configAnime.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/config.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/config.js?$sbPID"></script>
<div id="content960">
#if $varExists('header') #if $varExists('header')
<h1 class="header">$header</h1> <h1 class="header">$header</h1>
#else #else
<h1 class="title">$title</h1> <h1 class="title">$title</h1>
#end if #end if
<div id="config"> <div id="config">
<div id="config-content"> <div id="config-content">
<form id="configForm" action="saveAnime" method="post"> <form id="configForm" action="saveAnime" method="post">
<div id="config-components"> <div id="config-components">
<ul> <ul>
<li><a href="#core-component-group1">AnimeDB Settings</a></li> <li><a href="#core-component-group1">Anime Settings</a></li>
<li><a href="#core-component-group2">Look & Feel</a></li> </ul>
</ul>
<div id="core-component-group1" class="tab-pane active component-group"> <div id="core-component-group1">
<div class="component-group-desc"> <div class="component-group">
<div class="component-group-desc">
<h3>Misc</h3>
<p>User interface and general configuration.</p>
</div>
<fieldset class="component-group-list">
<div class="field-pair">
<label for="split_home">
<span class="component-title">Split show lists</span>
<span class="component-desc">
<input type="checkbox" class="enabler" name="split_home" id="split_home" #if $sickbeard.ANIME_SPLIT_HOME then 'checked="checked"' else ""# />
<p>separate anime from other shows on the home page</p>
</span>
</label>
</div>
<div class="field-pair">
<label for="anime_treat_as_hdtv">
<span class="component-title">Quality control</span>
<span class="component-desc">
<input type="checkbox" class="enabler" name="anime_treat_as_hdtv" id="anime_treat_as_hdtv" #if $sickbeard.ANIME_TREAT_AS_HDTV then 'checked="checked"' else ""# />
<p>treat anime releases that lack a quality tag as HDTV instead of "unknown".&nbsp;This may improve snatch success</p>
</span>
</label>
</div>
<input type="submit" class="btn config_submitter" value="Save Changes" />
</fieldset>
</div><!-- /component-group //-->
<div class="component-group">
<div class="component-group-desc">
<img class="notifier-icon" src="$sbRoot/images/providers/anidb.gif" alt="AniDB" title="AniDB" width="24" height="24" /> <img class="notifier-icon" src="$sbRoot/images/providers/anidb.gif" alt="AniDB" title="AniDB" width="24" height="24" />
<h3><a href="<%= anon_url('http://anidb.info') %>" onclick="window.open(this.href, '_blank'); return false;">AniDB</a></h3> <h3><a href="<%= anon_url('http://anidb.info') %>" onclick="window.open(this.href, '_blank'); return false;">AniDB</a></h3>
<p>AniDB is non-profit database of anime information that is freely open to the public</p> <p>Manage anime releases with AniDB.</p>
</div> </div>
<fieldset class="component-group-list"> <fieldset class="component-group-list">
<div class="field-pair"> <div class="field-pair">
<input type="checkbox" class="enabler" name="use_anidb" id="use_anidb" #if $sickbeard.USE_ANIDB then "checked=\"checked\"" else ""# /> <label for="use_anidb">
<label for="use_notifo"> <span class="component-title">Enable</span>
<span class="component-title">Enable</span> <span class="component-desc">
<span class="component-desc">Should Sick Beard use data from AniDB?</span> <input type="checkbox" class="enabler" name="use_anidb" id="use_anidb" #if $sickbeard.USE_ANIDB then 'checked="checked"' else ""# />
</label> <p>fetch data for scene exceptions (alternative release names) and available 'release groups' per show</p>
</div> </span>
</label>
</div>
<div id="content_use_anidb"> <div id="content_use_anidb">
<div class="field-pair"> <div class="field-pair">
<label class="nocheck"> <label for="anidb_username">
<span class="component-title">AniDB Username</span> <span class="component-title">AniDB username</span>
<input type="text" name="anidb_username" id="anidb_username" value="$sickbeard.ANIDB_USERNAME" class="form-control input-sm input350" /> <span class="component-desc">
</label> <input type="text" name="anidb_username" id="anidb_username" value="$sickbeard.ANIDB_USERNAME" class="form-control input-sm input350" />
<label class="nocheck"> </span>
<span class="component-title">&nbsp;</span> </label>
<span class="component-desc">Username of your AniDB account</span> </div>
</label>
</div>
<div class="field-pair"> <div class="field-pair">
<label class="nocheck"> <label for="anidb_password">
<span class="component-title">AniDB Password</span> <span class="component-title">AniDB password</span>
<input type="password" name="anidb_password" id="anidb_password" value="$sickbeard.ANIDB_PASSWORD" class="form-control input-sm input350" /> <span class="component-desc">
</label> <input type="password" name="anidb_password" id="anidb_password" value="$sickbeard.ANIDB_PASSWORD" class="form-control input-sm input350" />
<label class="nocheck"> </span>
<span class="component-title">&nbsp;</span> </label>
<span class="component-desc">Password of your AniDB account</span> </div>
</label>
</div> <div class="field-pair">
<div class="field-pair"> <label>
<input type="checkbox" name="anidb_use_mylist" id="anidb_use_mylist" #if $sickbeard.ANIDB_USE_MYLIST then "checked=\"checked\"" else ""# /> <span class="component-title">Sync AniDB</span>
<label> <span class="component-desc">
<span class="component-title">AniDB MyList</span> <input type="checkbox" name="anidb_use_mylist" id="anidb_use_mylist" #if $sickbeard.ANIDB_USE_MYLIST then 'checked="checked"' else ""# />
<span class="component-desc">Do you want to add the PostProcessed Episodes to the MyList ?</span> <p>add post-processed anime episodes to your "AniDB Mylist"</p>
</label> </span>
</div> </label>
</div>
</div> </div>
<input type="submit" class="btn config_submitter" value="Save Changes" />
</fieldset>
</div><!-- /component-group //--> <input type="submit" class="btn config_submitter" value="Save Changes" />
</fieldset>
</div><!-- /component-group //-->
<div id="core-component-group2" class="tab-pane component-group"> </div><!-- /component-group1 //-->
<div class="component-group-desc">
<h3>Look and Feel</h3>
</div>
<fieldset class="component-group-list">
<div class="field-pair">
<input type="checkbox" class="enabler" name="split_home" id="split_home" #if $sickbeard.ANIME_SPLIT_HOME then "checked=\"checked\"" else ""# />
<label for="use_notifo">
<span class="component-title">Split show lists</span>
<span class="component-desc">Separate anime and normal shows in groups</span>
</label>
</div>
<input type="submit" class="btn config_submitter" value="Save Changes" />
</fieldset>
</div><!-- /component-group //-->
<br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/> <br/><input type="submit" class="btn config_submitter" value="Save Changes" /><br/>
</div><!-- /config-components //--> </div><!-- /config-components //-->
</form> </form>
</div> </div>
</div> </div>
#include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_bottom.tmpl')
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_bottom.tmpl")

View file

@ -68,7 +68,7 @@
<span class="component-title">Update shows on startup</span> <span class="component-title">Update shows on startup</span>
<span class="component-desc"> <span class="component-desc">
<input type="checkbox" name="update_shows_on_start" id="update_shows_on_start" #if $sickbeard.UPDATE_SHOWS_ON_START then 'checked="checked"' else ''#/> <input type="checkbox" name="update_shows_on_start" id="update_shows_on_start" #if $sickbeard.UPDATE_SHOWS_ON_START then 'checked="checked"' else ''#/>
<p>with information such as next air dates, show ended, etc. Disable for a faster startup as show info is sheduled to update in the background anyway</p> <p>with information such as next air dates, show ended, etc. Disable for a faster startup as show info is scheduled to update in the background anyway</p>
</span> </span>
</label> </label>
</div> </div>
@ -282,10 +282,10 @@
<span class="component-title">Timezone:</span> <span class="component-title">Timezone:</span>
<span class="component-desc"> <span class="component-desc">
<label for="local" class="space-right"> <label for="local" class="space-right">
<input type="radio" name="timezone_display" id="local" value="local" #if "local" == $sickbeard.TIMEZONE_DISPLAY then 'checked="checked"' else ''# />Local <input type="radio" name="timezone_display" id="local" value="local" #if "local" == $sickbeard.TIMEZONE_DISPLAY then 'checked="checked"' else ''# />local
</label> </label>
<label for="network"> <label for="network">
<input type="radio" name="timezone_display" id="network" value="network" #if "network" == $sickbeard.TIMEZONE_DISPLAY then 'checked="checked"' else ''# />Network <input type="radio" name="timezone_display" id="network" value="network" #if "network" == $sickbeard.TIMEZONE_DISPLAY then 'checked="checked"' else ''# />network
</label> </label>
<div class="clear-left"><p>display dates and times in either your timezone or the shows network timezone</p></div> <div class="clear-left"><p>display dates and times in either your timezone or the shows network timezone</p></div>
</span> </span>

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -38,7 +38,7 @@
<div class="component-group-desc"> <div class="component-group-desc">
<h3>Episode Search</h3> <h3>Episode Search</h3>
<p>How to manage searching with <a href="$sbRoot/config/providers">providers</a>.</p> <p>How to manage searching with <a href="$sbRoot/config/providers/">providers</a>.</p>
</div> </div>
<fieldset class="component-group-list"> <fieldset class="component-group-list">

View file

@ -82,7 +82,7 @@
<div class="clearfix"></div> <div class="clearfix"></div>
<div id="showtitle" data-showname="$show.name"> <div id="showtitle" data-showname="$show.name">
<h1 class="title" id="scene_exception_$show.indexerid">$show.name</h1> <h1 class="title" id="scene_exception_$show.indexerid"><span>$show.name</span></h1>
</div> </div>
@ -163,17 +163,17 @@
#else #else
#if 'country_codes' in $show.imdb_info: #if 'country_codes' in $show.imdb_info:
#for $country in $show.imdb_info['country_codes'].split('|') #for $country in $show.imdb_info['country_codes'].split('|')
<img src="$sbRoot/images/flags/${$country}.png" width="16" height="11" style="margin-left: 3px; vertical-align:middle;" /> <img class="flag" src="$sbRoot/images/flags/${$country}.png" width="16" height="11" />
#end for #end for
#end if #end if
#if 'year' in $show.imdb_info: #if 'year' in $show.imdb_info:
<span>($show.imdb_info['year']) - $show.imdb_info['runtimes'] minutes - </span> <span class="imdb-info">($show.imdb_info['year']) - $show.imdb_info['runtimes'] minutes</span>
#end if #end if
<a href="<%= anon_url('http://www.imdb.com/title/', _show.imdbid) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://www.imdb.com/title/$show.imdbid"><img alt="[imdb]" height="16" width="16" src="$sbRoot/images/imdb.png" style="margin-top: -1px; vertical-align:middle;"/></a> <a class="service" href="<%= anon_url('http://www.imdb.com/title/', _show.imdbid) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://www.imdb.com/title/$show.imdbid"><img alt="[imdb]" height="16" width="16" src="$sbRoot/images/imdb.png" /></a>
#end if #end if
<a href="<%= anon_url(sickbeard.indexerApi(_show.indexer).config['show_url'], _show.indexerid) %>" onclick="window.open(this.href, '_blank'); return false;" title="$sickbeard.indexerApi($show.indexer).config["show_url"]$show.indexerid"><img alt="$sickbeard.indexerApi($show.indexer).name" height="16" width="16" src="$sbRoot/images/$sickbeard.indexerApi($show.indexer).config["icon"] "style="margin-top: -1px; vertical-align:middle;"/></a> <a class="service" href="<%= anon_url(sickbeard.indexerApi(_show.indexer).config['show_url'], _show.indexerid) %>" onclick="window.open(this.href, '_blank'); return false;" title="$sickbeard.indexerApi($show.indexer).config['show_url']$show.indexerid"><img alt="$sickbeard.indexerApi($show.indexer).name" height="16" width="16" src="$sbRoot/images/$sickbeard.indexerApi($show.indexer).config['icon']" /></a>
#if $xem_numbering or $xem_absolute_numbering: #if $xem_numbering or $xem_absolute_numbering:
<a href="<%= anon_url('http://thexem.de/search?q=', _show.name) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://thexem.de/search?q-$show.name"><img alt="[xem]" height="16" width="16" src="$sbRoot/images/xem.png" style="margin-top: -1px; vertical-align:middle;"/></a> <a class="service" href="<%= anon_url('http://thexem.de/search?q=', _show.name) %>" rel="noreferrer" onclick="window.open(this.href, '_blank'); return false;" title="http://thexem.de/search?q-$show.name"><img alt="[xem]" height="16" width="16" src="$sbRoot/images/xem.png" /></a>
#end if #end if
</div> </div>
@ -349,7 +349,7 @@
#end if #end if
#if int($epResult["season"]) != $curSeason: #if int($epResult["season"]) != $curSeason:
<tr> <tr id="season-$epResult['season']">
<th class="row-seasonheader" colspan="13" style="width: auto;"><h3><a name="season-$epResult["season"]"></a>#if int($epResult["season"]) == 0 then "Specials" else "Season " + str($epResult["season"])#</h3></th> <th class="row-seasonheader" colspan="13" style="width: auto;"><h3><a name="season-$epResult["season"]"></a>#if int($epResult["season"]) == 0 then "Specials" else "Season " + str($epResult["season"])#</h3></th>
</tr> </tr>

View file

@ -58,7 +58,7 @@
return s.replace('Loading...','000'); return s.replace('Loading...','000');
else else
#if not $sickbeard.SORT_ARTICLE: #if not $sickbeard.SORT_ARTICLE:
return (s || '').replace(/^(The|A|An)\s/i,''); return (s || '').replace(/^(?:(?:A(?!\s+to)n?)|The)\s(\w)/i, '$1');
#else: #else:
return (s || ''); return (s || '');
#end if #end if
@ -162,7 +162,7 @@
name: function( itemElem ) { name: function( itemElem ) {
var name = \$( itemElem ).attr('data-name'); var name = \$( itemElem ).attr('data-name');
#if not $sickbeard.SORT_ARTICLE: #if not $sickbeard.SORT_ARTICLE:
return (name || '').replace(/^(The|A|An)\s/i,''); return (name || '').replace(/^(?:(?:A(?!\s+to)n?)|The)\s(\w)/i, '$1');
#else: #else:
return (name || ''); return (name || '');
#end if #end if
@ -280,10 +280,13 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
#set $cur_downloaded = 0 #set $cur_downloaded = 0
#set $cur_total = 0 #set $cur_total = 0
#set $download_stat_tip = '' #set $download_stat_tip = ''
#if None is not $curShow.status and re.search(r'(?i)(?:new|returning)\s*series', $curShow.status) #set $display_status = $curShow.status
#set $display_status = 'Continuing' #if None is not $display_status
#else #if re.search(r'(?i)(?:new|returning)\s*series', $curShow.status)
#set $display_status = $curShow.status #set $display_status = 'Continuing'
#else if re.search(r'(?i)(?:nded)', $curShow.status)
#set $display_status = 'Ended'
#end if
#end if #end if
#if $curShow.indexerid in $show_stat: #if $curShow.indexerid in $show_stat:
@ -535,7 +538,7 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
#if $layout == 'small': #if $layout == 'small':
<td class="tvShow"> <td class="tvShow">
<div class="imgsmallposter $layout"> <div class="imgsmallposter $layout">
<a href="$sbRoot/showPoster/?show=$curShow.indexerid&amp;which=$layout" rel="dialog" title="$curShow.name"> <a href="$sbRoot/showPoster/?show=$curShow.indexerid&amp;which=poster" rel="dialog" title="$curShow.name">
<img src="$sbRoot/showPoster/?show=$curShow.indexerid&amp;which=poster_thumb" class="$layout" alt="$curShow.indexerid"/> <img src="$sbRoot/showPoster/?show=$curShow.indexerid&amp;which=poster_thumb" class="$layout" alt="$curShow.indexerid"/>
</a> </a>
<a href="$sbRoot/home/displayShow?show=$curShow.indexerid" style="vertical-align: middle;">$curShow.name</a> <a href="$sbRoot/home/displayShow?show=$curShow.indexerid" style="vertical-align: middle;">$curShow.name</a>
@ -604,11 +607,16 @@ $myShowList.sort(lambda x, y: cmp(x.name, y.name))
</td> </td>
<td align="center"> <td align="center">
#if None is not $curShow.status and re.search(r'(?i)(?:new|returning)\s*series', $curShow.status)
Continuing #set $display_status = $curShow.status
#else: #if None is not $display_status
$curShow.status #if re.search(r'(?i)(?:new|returning)\s*series', $curShow.status)
#set $display_status = 'Continuing'
#else if re.search(r'(?i)(?:nded)', $curShow.status)
#set $display_status = 'Ended'
#end if
#end if #end if
$display_status
</td> </td>
</tr> </tr>

View file

@ -2,27 +2,27 @@
#import sickbeard #import sickbeard
#from sickbeard.helpers import anon_url #from sickbeard.helpers import anon_url
#set global $header="New Show" #set global $header = 'New Show'
#set global $title="New Show" #set global $title = 'New Show'
#set global $sbPath="../.." #set global $sbPath = '../..'
#set global $statpath="../.."# #set global $statpath = '../..'
#set global $topmenu="home"# #set global $topmenu = 'home'
#import os.path #import os.path
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_top.tmpl") #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_top.tmpl')
<script type="text/javascript" src="$sbRoot/js/formwizard.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/formwizard.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/qualityChooser.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/qualityChooser.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/newShow.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/newShow.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/addShowOptions.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/addShowOptions.js?$sbPID"></script>
#if $varExists('header') #if $varExists('header')
<h1 class="header">$header</h1> <h1 class="header">$header</h1>
#else #else
<h1 class="title">$title</h1> <h1 class="title">$title</h1>
#end if #end if
<div id="newShowPortal"> <div id="newShowPortal">
@ -31,20 +31,21 @@
<form id="addShowForm" method="post" action="$sbRoot/home/addShows/addNewShow" accept-charset="utf-8" style="width: 800px;"> <form id="addShowForm" method="post" action="$sbRoot/home/addShows/addNewShow" accept-charset="utf-8" style="width: 800px;">
<fieldset class="sectionwrap"> <fieldset class="sectionwrap">
<legend class="legendStep">Find a show on the TVDB or TVRAGE</legend> <legend class="legendStep">Find a show on the TVDB or TVRAGE</legend>
<div class="stepDiv"> <div class="stepDiv">
<input type="hidden" id="indexer_timeout" value="$sickbeard.INDEXER_TIMEOUT" /> <input type="hidden" id="indexer_timeout" value="$sickbeard.INDEXER_TIMEOUT" />
#if $use_provided_info: #if $use_provided_info
Show retrieved from existing metadata: <a href="<%= anon_url(sickbeard.indexerApi(provided_indexer).config['show_url'], provided_indexer_id) %>">$provided_indexer_name</a> #set $provided_indexer_local = $provided_indexer
#set $provided_indexer_id_local = $provided_indexer_id
Show retrieved from existing metadata: <a href="<%= anon_url(sickbeard.indexerApi(provided_indexer_local).config['show_url'], provided_indexer_id_local) %>">$provided_indexer_name</a>
<input type="hidden" name="indexerLang" value="en" /> <input type="hidden" name="indexerLang" value="en" />
<input type="hidden" name="whichSeries" value="$provided_indexer_id" /> <input type="hidden" name="whichSeries" value="$provided_indexer_id" />
<input type="hidden" id="providedName" value="$provided_indexer_name" /> <input type="hidden" id="providedName" value="$provided_indexer_name" />
<input type="hidden" id="providedIndexer" value="$provided_indexer" /> <input type="hidden" id="providedIndexer" value="$provided_indexer" />
#else: #else
<input type="text" id="nameToSearch" value="$default_show_name" class="form-control form-control-inline input-sm input350" /> <input type="text" id="nameToSearch" value="$default_show_name" class="form-control form-control-inline input-sm input350" />
&nbsp; &nbsp;
<select name="indexerLang" id="indexerLangSelect" class="form-control form-control-inline input-sm"> <select name="indexerLang" id="indexerLangSelect" class="form-control form-control-inline input-sm">
@ -53,9 +54,9 @@
&nbsp; &nbsp;
<select name="providedIndexer" id="providedIndexer" class="form-control form-control-inline input-sm"> <select name="providedIndexer" id="providedIndexer" class="form-control form-control-inline input-sm">
<option value="0" #if $provided_indexer == 0 then "selected=\"selected\"" else ""#>All Indexers</option> <option value="0" #if $provided_indexer == 0 then "selected=\"selected\"" else ""#>All Indexers</option>
#for $indexer in $indexers #for $indexer in $indexers
<option value="$indexer" #if $provided_indexer == $indexer then "selected=\"selected\"" else ""#>$indexers[$indexer]</option> <option value="$indexer" #if $provided_indexer == $indexer then "selected=\"selected\"" else ""#>$indexers[$indexer]</option>
#end for #end for
</select> </select>
&nbsp; &nbsp;
<input class="btn btn-inline" type="button" id="searchName" value="Search" /> <input class="btn btn-inline" type="button" id="searchName" value="Search" />
@ -65,48 +66,48 @@
This <b>DOES NOT</b> allow SickGear to download non-english TV episodes!<br /> This <b>DOES NOT</b> allow SickGear to download non-english TV episodes!<br />
<br /> <br />
<div id="searchResults" style="height: 100%;"><br/></div> <div id="searchResults" style="height: 100%;"><br/></div>
#end if #end if
</div> </div>
</fieldset> </fieldset>
<fieldset class="sectionwrap"> <fieldset class="sectionwrap">
<legend class="legendStep">Pick the parent folder</legend> <legend class="legendStep">Pick the parent folder</legend>
<div class="stepDiv"> <div class="stepDiv">
#if $provided_show_dir: #if $provided_show_dir
Pre-chosen Destination Folder: <b>$provided_show_dir</b> <br /> Pre-chosen Destination Folder: <b>$provided_show_dir</b> <br />
<input type="hidden" id="fullShowPath" name="fullShowPath" value="$provided_show_dir" /><br /> <input type="hidden" id="fullShowPath" name="fullShowPath" value="$provided_show_dir" /><br />
#else #else
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_rootDirs.tmpl") #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_rootDirs.tmpl')
#end if #end if
</div>
</fieldset>
<fieldset class="sectionwrap">
<legend class="legendStep">Customize options</legend>
<div class="stepDiv">
#include $os.path.join($sickbeard.PROG_DIR, "gui/slick/interfaces/default/inc_addShowOptions.tmpl")
</div> </div>
</fieldset> </fieldset>
#for $curNextDir in $other_shows: <fieldset class="sectionwrap">
<input type="hidden" name="other_shows" value="$curNextDir" /> <legend class="legendStep">Customize options</legend>
#end for <div class="stepDiv">
<input type="hidden" name="skipShow" id="skipShow" value="" /> #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_addShowOptions.tmpl')
</div>
</fieldset>
#for $curNextDir in $other_shows
<input type="hidden" name="other_shows" value="$curNextDir" />
#end for
<input type="hidden" name="skipShow" id="skipShow" value="" />
</form> </form>
<br /> <br />
<div style="width: 100%; text-align: center;"> <div style="width: 100%; text-align: center;">
<input class="btn" type="button" id="addShowButton" value="Add Show" disabled="disabled" /> <input class="btn" type="button" id="addShowButton" value="Add Show" disabled="disabled" />
#if $provided_show_dir: #if $provided_show_dir
<input class="btn" type="button" id="skipShowButton" value="Skip Show" /> <input class="btn" type="button" id="skipShowButton" value="Skip Show" />
#end if #end if
</div> </div>
<script type="text/javascript" src="$sbRoot/js/rootDirs.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/rootDirs.js?$sbPID"></script>
</div></div> </div></div>
#include $os.path.join($sickbeard.PROG_DIR,"gui/slick/interfaces/default/inc_bottom.tmpl") #include $os.path.join($sickbeard.PROG_DIR, 'gui/slick/interfaces/default/inc_bottom.tmpl')

View file

@ -1,6 +1,7 @@
#import sickbeard #import sickbeard
#import datetime #import datetime
#import re #import re
#import urllib
#from sickbeard.common import * #from sickbeard.common import *
#from sickbeard import sbdatetime #from sickbeard import sbdatetime
#from sickbeard.helpers import anon_url #from sickbeard.helpers import anon_url
@ -23,6 +24,7 @@
// initialise combos for dirty page refreshes // initialise combos for dirty page refreshes
\$('#showsort').val('original'); \$('#showsort').val('original');
\$('#showsortdirection').val('asc'); \$('#showsortdirection').val('asc');
\$('#showfilter').val('*');
var \$container = [\$('#container')]; var \$container = [\$('#container')];
jQuery.each(\$container, function (j) { jQuery.each(\$container, function (j) {
@ -34,7 +36,7 @@
name: function( itemElem ) { name: function( itemElem ) {
var name = \$( itemElem ).attr('data-name') || ''; var name = \$( itemElem ).attr('data-name') || '';
#if not $sickbeard.SORT_ARTICLE: #if not $sickbeard.SORT_ARTICLE:
name = name.replace(/^(The|A|An)\s/i, ''); name = name.replace(/^(?:(?:A(?!\s+to)n?)|The)\s(\w)/i, '$1');
#end if #end if
return name.toLowerCase(); return name.toLowerCase();
}, },
@ -73,6 +75,11 @@
\$('#showsortdirection').on( 'change', function() { \$('#showsortdirection').on( 'change', function() {
\$('#container').isotope({sortAscending: ('asc' == this.value)}); \$('#container').isotope({sortAscending: ('asc' == this.value)});
}); });
\$('#showfilter').on( 'change', function() {
var filterValue = this.value;
\$('#container').isotope({ filter: filterValue });
});
}); });
//--> //-->
@ -86,7 +93,16 @@
#if $trending_shows #if $trending_shows
<div class="pull-right" style="margin-top: -40px;"> <div class="pull-right" style="margin-top: -40px;">
<span>Sort By:</span> <span>Show:</span>
<select id="showfilter" class="form-control form-control-inline input-sm">
#set $count_trending = len($trending_shows)
#set $count_inlibrary = $trending_inlibrary
<option value="*" selected="selected">All<%= ' (%d)' % count_trending %></option>
<option value=".notinlibrary">Not In Library<%= ' (%d)' % (count_trending - count_inlibrary) %></option>
<option value=".inlibrary">In Library<%= ' (%d)' % count_inlibrary %></option>
</select>
<span style="margin-left:12px">Sort By:</span>
<select id="showsort" class="form-control form-control-inline input-sm"> <select id="showsort" class="form-control form-control-inline input-sm">
<option value="name">Name</option> <option value="name">Name</option>
<option value="original" selected="selected">Original</option> <option value="original" selected="selected">Original</option>
@ -115,7 +131,7 @@
#set $image = re.sub(r'(.*)(\..*?)$', r'\1-300\2', $cur_show['images']['poster'], 0, re.IGNORECASE | re.MULTILINE) #set $image = re.sub(r'(.*)(\..*?)$', r'\1-300\2', $cur_show['images']['poster'], 0, re.IGNORECASE | re.MULTILINE)
<div class="trakt_show" data-name="$cur_show['title']" data-rating="$cur_show['ratings']['percentage']" data-votes="$cur_show['ratings']['votes']"> <div class="trakt_show <%= ('notinlibrary', 'inlibrary')[':' in cur_show['show_id']] %>" data-name="$cur_show['title']" data-rating="$cur_show['ratings']['percentage']" data-votes="$cur_show['ratings']['votes']">
<div class="traktContainer"> <div class="traktContainer">
<div class="trakt-image"> <div class="trakt-image">
<a class="trakt-image" href="<%= anon_url(cur_show['url']) %>" target="_blank"><img alt="" class="trakt-image" src="${image}" /></a> <a class="trakt-image" href="<%= anon_url(cur_show['url']) %>" target="_blank"><img alt="" class="trakt-image" src="${image}" /></a>
@ -130,10 +146,11 @@
<i>$cur_show['ratings']['votes'] votes</i> <i>$cur_show['ratings']['votes'] votes</i>
<div class="traktShowTitleIcons"> <div class="traktShowTitleIcons">
#if 'ExistsInLibrary' in $cur_show['tvdb_id']: #if ':' in $cur_show['show_id']:
<p style="line-height: 1.5; padding: 2px 5px 3px">In library</p> <p style="line-height: 1.5; padding: 2px 5px 3px" title="<%= '%s added' % ('TVRage', 'theTVDB')['1' == cur_show['show_id'][:1]] %>">In library</p>
#else #else
<a href="$sbRoot/home/addTraktShow?indexer_id=${cur_show['tvdb_id']}&amp;showName=${cur_show['title']}" class="btn btn-xs">Add Show</a> #set $encoded_show_title = urllib.quote($cur_show['title'].encode("utf-8"))
<a href="$sbRoot/home/addTraktShow?indexer_id=${cur_show['show_id']}&amp;showName=${encoded_show_title}" class="btn btn-xs">Add Show</a>
#end if #end if
</div> </div>
</div> </div>

View file

@ -3,52 +3,49 @@
#from sickbeard import db, sbdatetime #from sickbeard import db, sbdatetime
#from sickbeard.common import * #from sickbeard.common import *
</div> <!-- /content --> </div> <!-- /content -->
</div> <!-- /contentWrapper --> </div> <!-- /contentWrapper -->
<footer> <footer>
<div class="footer clearfix"> <div class="footer clearfix">
#set $myDB = $db.DBConnection() #set $my_db = $db.DBConnection()
#set $today = str($datetime.date.today().toordinal()) #set $today = str($datetime.date.today().toordinal())
#set status_quality = '(' + ','.join([str(quality) for quality in $Quality.SNATCHED + $Quality.SNATCHED_PROPER]) + ')' #set status_quality = '(' + ','.join([str(quality) for quality in $Quality.SNATCHED + $Quality.SNATCHED_PROPER]) + ')'
#set status_download = '(' + ','.join([str(quality) for quality in $Quality.DOWNLOADED + [$ARCHIVED]]) + ')' #set status_download = '(' + ','.join([str(quality) for quality in $Quality.DOWNLOADED + [$ARCHIVED]]) + ')'
#set $sql_statement = 'SELECT ' #set $sql_statement = 'SELECT '
#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_quality + ') AS ep_snatched, ' #set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_quality + ') AS ep_snatched, '
#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_download + ') AS ep_downloaded, ' #set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 AND status IN ' + $status_download + ') AS ep_downloaded, '
#set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 ' #set $sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND airdate > 1 '
#set $sql_statement += ' AND ((airdate <= ' + $today + ' AND (status = ' + str($SKIPPED) + ' OR status = ' + str($WANTED) + ')) ' #set $sql_statement += ' AND ((airdate <= ' + $today + ' AND (status = ' + str($SKIPPED) + ' OR status = ' + str($WANTED) + ')) '
#set $sql_statement += ' OR (status IN ' + status_quality + ') OR (status IN ' + status_download + '))) AS ep_total ' #set $sql_statement += ' OR (status IN ' + status_quality + ') OR (status IN ' + status_download + '))) AS ep_total '
#set $sql_statement += ' FROM tv_episodes tv_eps LIMIT 1' #set $sql_statement += ' FROM tv_episodes tv_eps LIMIT 1'
#set $sql_result = $myDB.select($sql_statement) #set $sql_result = $my_db.select($sql_statement)
#set $shows_total = len($sickbeard.showList) #set $shows_total = len($sickbeard.showList)
#set $shows_active = len([show for show in $sickbeard.showList if show.paused == 0 and show.status != "Ended"]) #set $shows_active = len([show for show in $sickbeard.showList if show.paused == 0 and show.status != "Ended"])
#if $sql_result: #if $sql_result:
#set $ep_snatched = $sql_result[0]['ep_snatched'] #set $ep_snatched = $sql_result[0]['ep_snatched']
#set $ep_downloaded = $sql_result[0]['ep_downloaded'] #set $ep_downloaded = $sql_result[0]['ep_downloaded']
#set $ep_total = $sql_result[0]['ep_total'] #set $ep_total = $sql_result[0]['ep_total']
#else #else
#set $ep_snatched = 0 #set $ep_snatched = 0
#set $ep_downloaded = 0 #set $ep_downloaded = 0
#set $ep_total = 0 #set $ep_total = 0
#end if #end if
<div class="footerhighlight">$shows_total</div> Shows (<div class="footerhighlight">$shows_active</div> Active) | <div class="footerhighlight"><%=ep_downloaded%></div>#if $ep_snatched > 0 then " (" + "<div class=\"footerhighlight\">+" + str($ep_snatched) + "</div> snatched)" else ""# / <div class="footerhighlight">$ep_total</div> Episodes Downloaded | Daily Search: <div class="footerhighlight"><%=str(sickbeard.dailySearchScheduler.timeLeft()).split('.')[0]%></div> | Backlog Search: <div class="footerhighlight">$sbdatetime.sbdatetime.sbfdate($sickbeard.backlogSearchScheduler.nextRun())</div> <span class="footerhighlight">$shows_total</span> shows (<span class="footerhighlight">$shows_active</span> active)
| <span class="footerhighlight"><%= ep_downloaded %></span><%= ('', ' (<span class="footerhighlight">+%s</span> snatched)' % str(ep_snatched))[ep_snatched > 0] %>&nbsp;/&nbsp;<span class="footerhighlight">$ep_total</span> episodes downloaded
| daily search: <span class="footerhighlight"><%= str(sickbeard.dailySearchScheduler.timeLeft()).split('.')[0] %></span>
| backlog search: <span class="footerhighlight">$sbdatetime.sbdatetime.sbfdate($sickbeard.backlogSearchScheduler.nextRun())</span>
</div> </div>
<!--
<ul style="display: table; margin: 0 auto; font-size: 12px; list-style-type: none; padding: 0; padding-top: 10px;">
<li><a href="$sbRoot/manage/manageSearches/forceVersionCheck"><img src="$sbRoot/images/menu/update16.png" alt="" width="16" height="16" style="vertical-align:middle;" />Force Version Check</a></li>
<li><a href="$sbRoot/home/restart/?pid=$sbPID" class="confirm"><img src="$sbRoot/images/menu/restart16.png" alt="" width="16" height="16" style="vertical-align:middle;" />Restart</a></li>
<li><a href="$sbRoot/home/shutdown/?pid=$sbPID" class="confirm"><img src="$sbRoot/images/menu/shutdown16.png" alt="" width="16" height="16" style="vertical-align:middle;" />Shutdown</a></li>
</ul>
-->
</footer> </footer>
</body> </body>

View file

@ -42,17 +42,6 @@
<link rel="stylesheet" type="text/css" href="$sbRoot/css/style.css?$sbPID"/> <link rel="stylesheet" type="text/css" href="$sbRoot/css/style.css?$sbPID"/>
<link rel="stylesheet" type="text/css" href="$sbRoot/css/${sickbeard.THEME_NAME}.css?$sbPID" /> <link rel="stylesheet" type="text/css" href="$sbRoot/css/${sickbeard.THEME_NAME}.css?$sbPID" />
<style type="text/css">
<!--
#if $sickbeard.NEWEST_VERSION_STRING:
.ui-pnotify { top: 30px !important; }
#end if
//-->
</style>
<script type="text/javascript" src="$sbRoot/js/lib/jquery-1.8.3.min.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/lib/jquery-1.8.3.min.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/lib/bootstrap.min.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/lib/bootstrap.min.js?$sbPID"></script>
<script type="text/javascript" src="$sbRoot/js/lib/bootstrap-hover-dropdown.min.js?$sbPID"></script> <script type="text/javascript" src="$sbRoot/js/lib/bootstrap-hover-dropdown.min.js?$sbPID"></script>

View file

@ -18,7 +18,7 @@
}, },
format: function(s) { format: function(s) {
#if not $sickbeard.SORT_ARTICLE: #if not $sickbeard.SORT_ARTICLE:
return (s || '').replace(/^(The|A|An)\s/i,''); return (s || '').replace(/^(?:(?:A(?!\s+to)n?)|The)\s(\w)/i, '$1');
#else: #else:
return (s || ''); return (s || '');
#end if #end if

View file

@ -1,27 +1,27 @@
$(document).ready(function(){ $(document).ready(function(){
$(".enabler").each(function(){ $('.enabler').each(function(){
if (!$(this).prop('checked')) if (!$(this).prop('checked'))
$('#content_'+$(this).attr('id')).hide(); $('#content_' + $(this).attr('id')).hide();
}); });
$(".enabler").click(function() { $('.enabler').click(function(){
if ($(this).prop('checked')) if ($(this).prop('checked'))
$('#content_'+$(this).attr('id')).fadeIn("fast", "linear"); $('#content_' + $(this).attr('id')).fadeIn('fast', 'linear');
else else
$('#content_'+$(this).attr('id')).fadeOut("fast", "linear"); $('#content_' + $(this).attr('id')).fadeOut('fast', 'linear');
}); });
$(".viewIf").click(function() { $('.viewIf').click(function(){
if ($(this).prop('checked')) { if ($(this).prop('checked')) {
$('.hide_if_'+$(this).attr('id')).css('display','none'); $('.hide_if_' + $(this).attr('id')).css('display','none');
$('.show_if_'+$(this).attr('id')).fadeIn("fast", "linear"); $('.show_if_' + $(this).attr('id')).fadeIn('fast', 'linear');
} else { } else {
$('.show_if_'+$(this).attr('id')).css('display','none'); $('.show_if_' + $(this).attr('id')).css('display','none');
$('.hide_if_'+$(this).attr('id')).fadeIn("fast", "linear"); $('.hide_if_' + $(this).attr('id')).fadeIn('fast', 'linear');
} }
}); });
$(".datePresets").click(function() { $('.datePresets').click(function(){
var def = $('#date_presets').val() var def = $('#date_presets').val()
if ($(this).prop('checked') && '%x' == def) { if ($(this).prop('checked') && '%x' == def) {
def = '%a, %b %d, %Y' def = '%a, %b %d, %Y'
@ -46,7 +46,7 @@ $(document).ready(function(){
$('#configForm').ajaxForm({ $('#configForm').ajaxForm({
beforeSubmit: function(){ beforeSubmit: function(){
$('.config_submitter').each(function(){ $('.config_submitter').each(function(){
$(this).attr("disabled", "disabled"); $(this).attr('disabled', 'disabled');
$(this).after('<span><img src="' + sbRoot + '/images/loading16' + themeSpinner + '.gif"> Saving...</span>'); $(this).after('<span><img src="' + sbRoot + '/images/loading16' + themeSpinner + '.gif"> Saving...</span>');
$(this).hide(); $(this).hide();
}); });
@ -68,8 +68,8 @@ $(document).ready(function(){
}); });
}); });
$('#branchCheckout').click(function () { $('#branchCheckout').click(function(){
url = sbRoot+'/home/branchCheckout?branch='+$("#branchVersion").val(); url = sbRoot + '/home/branchCheckout?branch=' + $('#branchVersion').val();
window.location.href = url; window.location.href = url;
}); });
@ -77,7 +77,7 @@ $(document).ready(function(){
function config_success(){ function config_success(){
$('.config_submitter').each(function(){ $('.config_submitter').each(function(){
$(this).removeAttr("disabled"); $(this).removeAttr('disabled');
$(this).next().remove(); $(this).next().remove();
$(this).show(); $(this).show();
}); });

View file

@ -230,26 +230,26 @@ $(document).ready(function () {
$.get(sbRoot + '/config/postProcessing/isNamingValid', {pattern: pattern, multi: multi, anime_type: anime_type}, $.get(sbRoot + '/config/postProcessing/isNamingValid', {pattern: pattern, multi: multi, anime_type: anime_type},
function (data) { function (data) {
if (data == "invalid") { if (data == "invalid") {
$('#naming_pattern').qtip('option', { $('#naming_anime_pattern').qtip('option', {
'content.text': 'This pattern is invalid.', 'content.text': 'This pattern is invalid.',
'style.classes': 'qtip-rounded qtip-shadow qtip-red' 'style.classes': 'qtip-rounded qtip-shadow qtip-red'
}); });
$('#naming_pattern').qtip('toggle', true); $('#naming_anime_pattern').qtip('toggle', true);
$('#naming_pattern').css('background-color', '#FFDDDD'); $('#naming_anime_pattern').css('background-color', '#FFDDDD');
} else if (data == "seasonfolders") { } else if (data == "seasonfolders") {
$('#naming_pattern').qtip('option', { $('#naming_anime_pattern').qtip('option', {
'content.text': 'This pattern would be invalid without the folders, using it will force "Flatten" off for all shows.', 'content.text': 'This pattern would be invalid without the folders, using it will force "Flatten" off for all shows.',
'style.classes': 'qtip-rounded qtip-shadow qtip-red' 'style.classes': 'qtip-rounded qtip-shadow qtip-red'
}); });
$('#naming_pattern').qtip('toggle', true); $('#naming_anime_pattern').qtip('toggle', true);
$('#naming_pattern').css('background-color', '#FFFFDD'); $('#naming_anime_pattern').css('background-color', '#FFFFDD');
} else { } else {
$('#naming_pattern').qtip('option', { $('#naming_anime_pattern').qtip('option', {
'content.text': 'This pattern is valid.', 'content.text': 'This pattern is valid.',
'style.classes': 'qtip-rounded qtip-shadow qtip-green' 'style.classes': 'qtip-rounded qtip-shadow qtip-green'
}); });
$('#naming_pattern').qtip('toggle', false); $('#naming_anime_pattern').qtip('toggle', false);
$('#naming_pattern').css('background-color', '#FFFFFF'); $('#naming_anime_pattern').css('background-color', '#FFFFFF');
} }
}); });
} }

View file

@ -21,23 +21,23 @@ $(document).ready(function(){
*/ */
$.fn.getCategories = function (isDefault, selectedProvider) { $.fn.getCategories = function (isDefault, selectedProvider) {
var name = selectedProvider[0]; var name = selectedProvider[0];
var url = selectedProvider[1]; var url = selectedProvider[1];
var key = selectedProvider[2]; var key = selectedProvider[2];
if (!name) if (!name)
return; return;
if (!url) if (!url)
return; return;
if (!key) if (!key)
return; return;
var params = {url: url, name: name, key: key}; var params = {url: url, name: name, key: key};
var returnData; var returnData;
$.getJSON(sbRoot + '/config/providers/getNewznabCategories', params, $.getJSON(sbRoot + '/config/providers/getNewznabCategories', params,
function(data){ function(data){
updateNewznabCaps( data, selectedProvider ); updateNewznabCaps( data, selectedProvider );
console.debug(data.tv_categories); console.debug(data.tv_categories);
@ -46,12 +46,12 @@ $(document).ready(function(){
$.fn.addProvider = function (id, name, url, key, cat, isDefault, showProvider) { $.fn.addProvider = function (id, name, url, key, cat, isDefault, showProvider) {
url = $.trim(url); url = $.trim(url);
if (!url) if (!url)
return; return;
if (!/^https?:\/\//i.test(url)) if (!/^https?:\/\//i.test(url))
url = "http://" + url; url = 'http://' + url;
if (url.match('/$') == null) if (url.match('/$') == null)
url = url + '/'; url = url + '/';
@ -64,12 +64,12 @@ $(document).ready(function(){
$(this).populateNewznabSection(); $(this).populateNewznabSection();
} }
if ($('#provider_order_list > #'+id).length == 0 && showProvider != false) { if ($('#provider_order_list > #' + id).length == 0 && showProvider != false) {
var toAdd = '<li class="ui-state-default" id="' + id + '"> <input type="checkbox" id="enable_' + id + '" class="provider_enabler" CHECKED> <a href="' + anonURL + url + '" class="imgLink" target="_new"><img src="' + sbRoot var toAdd = '<li class="ui-state-default" id="' + id + '"> <input type="checkbox" id="enable_' + id + '" class="provider_enabler" CHECKED> <a href="' + anonURL + url + '" class="imgLink" target="_new"><img src="' + sbRoot
+ '/images/providers/newznab.png" alt="' + name + '" width="16" height="16"></a> ' + name + '</li>' + '/images/providers/newznab.png" alt="' + name + '" width="16" height="16"></a> ' + name + '</li>'
$('#provider_order_list').append(toAdd); $('#provider_order_list').append(toAdd);
$('#provider_order_list').sortable("refresh"); $('#provider_order_list').sortable('refresh');
} }
$(this).makeNewznabProviderString(); $(this).makeNewznabProviderString();
@ -84,12 +84,12 @@ $(document).ready(function(){
$('#editATorrentRssProvider').addOption(id, name); $('#editATorrentRssProvider').addOption(id, name);
$(this).populateTorrentRssSection(); $(this).populateTorrentRssSection();
if ($('#provider_order_list > #'+id).length == 0) { if ($('#provider_order_list > #' + id).length == 0) {
var toAdd = '<li class="ui-state-default" id="' + id + '"> <input type="checkbox" id="enable_' + id + '" class="provider_enabler" CHECKED> <a href="' + anonURL + url + '" class="imgLink" target="_new"><img src="' + sbRoot var toAdd = '<li class="ui-state-default" id="' + id + '"> <input type="checkbox" id="enable_' + id + '" class="provider_enabler" CHECKED> <a href="' + anonURL + url + '" class="imgLink" target="_new"><img src="' + sbRoot
+ '/images/providers/torrentrss.png" alt="' + name + '" width="16" height="16"></a> ' + name + '</li>' + '/images/providers/torrentrss.png" alt="' + name + '" width="16" height="16"></a> ' + name + '</li>'
$('#provider_order_list').append(toAdd); $('#provider_order_list').append(toAdd);
$('#provider_order_list').sortable("refresh"); $('#provider_order_list').sortable('refresh');
} }
$(this).makeTorrentRssProviderString(); $(this).makeTorrentRssProviderString();
@ -113,7 +113,7 @@ $(document).ready(function(){
$('#editANewznabProvider').removeOption(id); $('#editANewznabProvider').removeOption(id);
delete newznabProviders[id]; delete newznabProviders[id];
$(this).populateNewznabSection(); $(this).populateNewznabSection();
$('li').remove('#'+id); $('li').remove('#' + id);
$(this).makeNewznabProviderString(); $(this).makeNewznabProviderString();
} }
@ -129,7 +129,7 @@ $(document).ready(function(){
$('#editATorrentRssProvider').removeOption(id); $('#editATorrentRssProvider').removeOption(id);
delete torrentRssProviders[id]; delete torrentRssProviders[id];
$(this).populateTorrentRssSection(); $(this).populateTorrentRssSection();
$('li').remove('#'+id); $('li').remove('#' + id);
$(this).makeTorrentRssProviderString(); $(this).makeTorrentRssProviderString();
} }
@ -145,14 +145,14 @@ $(document).ready(function(){
$('#newznab_cat').attr('disabled','disabled'); $('#newznab_cat').attr('disabled','disabled');
$('#newznab_cap').attr('disabled','disabled'); $('#newznab_cap').attr('disabled','disabled');
$("#newznab_cat option").each(function() { $('#newznab_cat option').each(function() {
$(this).remove(); $(this).remove();
return; return;
}); });
$("#newznab_cap option").each(function() { $('#newznab_cap option').each(function() {
$(this).remove(); $(this).remove();
return; return;
}); });
} else { } else {
@ -160,8 +160,8 @@ $(document).ready(function(){
var isDefault = newznabProviders[selectedProvider][0]; var isDefault = newznabProviders[selectedProvider][0];
$('#newznab_add_div').hide(); $('#newznab_add_div').hide();
$('#newznab_update_div').show(); $('#newznab_update_div').show();
$('#newznab_cat').removeAttr("disabled"); $('#newznab_cat').removeAttr('disabled');
$('#newznab_cap').removeAttr("disabled"); $('#newznab_cap').removeAttr('disabled');
} }
$('#newznab_name').val(data[0]); $('#newznab_name').val(data[0]);
@ -170,56 +170,56 @@ $(document).ready(function(){
//Check if not already array //Check if not already array
if (typeof data[3] === 'string') { if (typeof data[3] === 'string') {
rrcat = data[3].split(",") rrcat = data[3].split(',')
} }
else { else {
rrcat = data[3]; rrcat = data[3];
} }
// Update the category select box (on the right) // Update the category select box (on the right)
var newCatOptions = []; var newCatOptions = [];
if (rrcat) { if (rrcat) {
rrcat.forEach(function (cat) { rrcat.forEach(function (cat) {
newCatOptions.push({text : cat, value : cat}); newCatOptions.push({text : cat, value : cat});
}); });
$("#newznab_cat").replaceOptions(newCatOptions); $('#newznab_cat').replaceOptions(newCatOptions);
}; };
if (selectedProvider == 'addNewznab') { if (selectedProvider == 'addNewznab') {
$('#newznab_name').removeAttr("disabled"); $('#newznab_name').removeAttr('disabled');
$('#newznab_url').removeAttr("disabled"); $('#newznab_url').removeAttr('disabled');
} else { } else {
$('#newznab_name').attr("disabled", "disabled"); $('#newznab_name').attr('disabled', 'disabled');
if (isDefault) { if (isDefault) {
$('#newznab_url').attr("disabled", "disabled"); $('#newznab_url').attr('disabled', 'disabled');
$('#newznab_delete').attr("disabled", "disabled"); $('#newznab_delete').attr('disabled', 'disabled');
} else { } else {
$('#newznab_url').removeAttr("disabled"); $('#newznab_url').removeAttr('disabled');
$('#newznab_delete').removeAttr("disabled"); $('#newznab_delete').removeAttr('disabled');
//Get Categories Capabilities //Get Categories Capabilities
if (data[0] && data[1] && data[2] && !ifExists($.fn.newznabProvidersCapabilities, data[0])) { if (data[0] && data[1] && data[2] && !ifExists($.fn.newznabProvidersCapabilities, data[0])) {
$(this).getCategories(isDefault, data); $(this).getCategories(isDefault, data);
} }
else { else {
updateNewznabCaps( null, data ); updateNewznabCaps( null, data );
} }
} }
} }
} }
ifExists = function(loopThroughArray, searchFor) { ifExists = function(loopThroughArray, searchFor) {
var found = false; var found = false;
loopThroughArray.forEach(function(rootObject) { loopThroughArray.forEach(function(rootObject) {
if (rootObject.name == searchFor) { if (rootObject.name == searchFor) {
found = true; found = true;
} }
console.log(rootObject.name + " while searching for: "+ searchFor); console.log(rootObject.name + ' while searching for: ' + searchFor);
}); });
return found; return found;
}; };
/** /**
@ -231,24 +231,24 @@ $(document).ready(function(){
*/ */
updateNewznabCaps = function( newzNabCaps, selectedProvider ) { updateNewznabCaps = function( newzNabCaps, selectedProvider ) {
if (newzNabCaps && !ifExists($.fn.newznabProvidersCapabilities, selectedProvider[0])) { if (newzNabCaps && !ifExists($.fn.newznabProvidersCapabilities, selectedProvider[0])) {
$.fn.newznabProvidersCapabilities.push({'name' : selectedProvider[0], 'categories' : newzNabCaps.tv_categories}); $.fn.newznabProvidersCapabilities.push({'name' : selectedProvider[0], 'categories' : newzNabCaps.tv_categories});
} }
//Loop through the array and if currently selected newznab provider name matches one in the array, use it to //Loop through the array and if currently selected newznab provider name matches one in the array, use it to
//update the capabilities select box (on the left). //update the capabilities select box (on the left).
if (selectedProvider[0]) { if (selectedProvider[0]) {
$.fn.newznabProvidersCapabilities.forEach(function(newzNabCap) { $.fn.newznabProvidersCapabilities.forEach(function(newzNabCap) {
if (newzNabCap.name && newzNabCap.name == selectedProvider[0] && newzNabCap.categories instanceof Array) { if (newzNabCap.name && newzNabCap.name == selectedProvider[0] && newzNabCap.categories instanceof Array) {
var newCapOptions = []; var newCapOptions = [];
newzNabCap.categories.forEach(function(category_set) { newzNabCap.categories.forEach(function(category_set) {
if (category_set.id && category_set.name) { if (category_set.id && category_set.name) {
newCapOptions.push({value : category_set.id, text : category_set.name + "(" + category_set.id + ")"}); newCapOptions.push({value : category_set.id, text : category_set.name + '(' + category_set.id + ')'});
}; };
}); });
$("#newznab_cap").replaceOptions(newCapOptions); $('#newznab_cap').replaceOptions(newCapOptions);
} }
}); });
}; };
} }
@ -284,14 +284,14 @@ $(document).ready(function(){
$('#torrentrss_cookies').val(data[2]); $('#torrentrss_cookies').val(data[2]);
if (selectedProvider == 'addTorrentRss') { if (selectedProvider == 'addTorrentRss') {
$('#torrentrss_name').removeAttr("disabled"); $('#torrentrss_name').removeAttr('disabled');
$('#torrentrss_url').removeAttr("disabled"); $('#torrentrss_url').removeAttr('disabled');
$('#torrentrss_cookies').removeAttr("disabled"); $('#torrentrss_cookies').removeAttr('disabled');
} else { } else {
$('#torrentrss_name').attr("disabled", "disabled"); $('#torrentrss_name').attr('disabled', 'disabled');
$('#torrentrss_url').removeAttr("disabled"); $('#torrentrss_url').removeAttr('disabled');
$('#torrentrss_cookies').removeAttr("disabled"); $('#torrentrss_cookies').removeAttr('disabled');
$('#torrentrss_delete').removeAttr("disabled"); $('#torrentrss_delete').removeAttr('disabled');
} }
} }
@ -309,14 +309,14 @@ $(document).ready(function(){
$.fn.refreshProviderList = function() { $.fn.refreshProviderList = function() {
var idArr = $("#provider_order_list").sortable('toArray'); var idArr = $('#provider_order_list').sortable('toArray');
var finalArr = new Array(); var finalArr = new Array();
$.each(idArr, function(key, val) { $.each(idArr, function(key, val) {
var checked = + $('#enable_'+val).prop('checked') ? '1' : '0'; var checked = + $('#enable_' + val).prop('checked') ? '1' : '0';
finalArr.push(val + ':' + checked); finalArr.push(val + ':' + checked);
}); });
$("#provider_order").val(finalArr.join(' ')); $('#provider_order').val(finalArr.join(' '));
} }
var newznabProviders = new Array(); var newznabProviders = new Array();
@ -327,8 +327,8 @@ $(document).ready(function(){
var provider_id = $(this).attr('id'); var provider_id = $(this).attr('id');
provider_id = provider_id.substring(0, provider_id.length-'_hash'.length); provider_id = provider_id.substring(0, provider_id.length-'_hash'.length);
var url = $('#'+provider_id+'_url').val(); var url = $('#' + provider_id + '_url').val();
var cat = $('#'+provider_id+'_cat').val(); var cat = $('#' + provider_id + '_cat').val();
var key = $(this).val(); var key = $(this).val();
$(this).updateProvider(provider_id, url, key, cat); $(this).updateProvider(provider_id, url, key, cat);
@ -339,15 +339,15 @@ $(document).ready(function(){
var selectedProvider = $('#editANewznabProvider :selected').val(); var selectedProvider = $('#editANewznabProvider :selected').val();
if (selectedProvider == "addNewznab") if (selectedProvider == 'addNewznab')
return; return;
var url = $('#newznab_url').val(); var url = $('#newznab_url').val();
var key = $('#newznab_key').val(); var key = $('#newznab_key').val();
var cat = $('#newznab_cat option').map(function(i, opt) { var cat = $('#newznab_cat option').map(function(i, opt) {
return $(opt).text(); return $(opt).text();
}).toArray().join(','); }).toArray().join(',');
$(this).updateProvider(selectedProvider, url, key, cat); $(this).updateProvider(selectedProvider, url, key, cat);
@ -357,7 +357,7 @@ $(document).ready(function(){
var selectedProvider = $('#editATorrentRssProvider :selected').val(); var selectedProvider = $('#editATorrentRssProvider :selected').val();
if (selectedProvider == "addTorrentRss") if (selectedProvider == 'addTorrentRss')
return; return;
var url = $('#torrentrss_url').val(); var url = $('#torrentrss_url').val();
@ -387,38 +387,38 @@ $(document).ready(function(){
console.debug('Clicked Button'); console.debug('Clicked Button');
//Maybe check if there is anything selected? //Maybe check if there is anything selected?
$("#newznab_cat option").each(function() { $('#newznab_cat option').each(function() {
$(this).remove(); $(this).remove();
return; return;
}); });
var newOptions = []; var newOptions = [];
// When the update botton is clicked, loop through the capabilities list // When the update botton is clicked, loop through the capabilities list
// and copy the selected category id's to the category list on the right. // and copy the selected category id's to the category list on the right.
$("#newznab_cap option").each(function(){ $('#newznab_cap option').each(function(){
if($(this).attr('selected') == 'selected') if($(this).attr('selected') == 'selected')
{ {
var selected_cat = $(this).val(); var selected_cat = $(this).val();
console.debug(selected_cat); console.debug(selected_cat);
newOptions.push({text: selected_cat, value: selected_cat}) newOptions.push({text: selected_cat, value: selected_cat})
}; };
}); });
$("#newznab_cat").replaceOptions(newOptions); $('#newznab_cat').replaceOptions(newOptions);
var selectedProvider = $('#editANewznabProvider :selected').val(); var selectedProvider = $('#editANewznabProvider :selected').val();
if (selectedProvider == "addNewznab") if (selectedProvider == 'addNewznab')
return; return;
var url = $('#newznab_url').val(); var url = $('#newznab_url').val();
var key = $('#newznab_key').val(); var key = $('#newznab_key').val();
var cat = $('#newznab_cat option').map(function(i, opt) { var cat = $('#newznab_cat option').map(function(i, opt) {
return $(opt).text(); return $(opt).text();
}).toArray().join(','); }).toArray().join(',');
$("#newznab_cat option:not([value])").remove(); $('#newznab_cat option:not([value])').remove();
$(this).updateProvider(selectedProvider, url, key, cat); $(this).updateProvider(selectedProvider, url, key, cat);
@ -435,17 +435,17 @@ $(document).ready(function(){
//var cat = $.trim($('#newznab_cat').val()); //var cat = $.trim($('#newznab_cat').val());
var cat = $.trim($('#newznab_cat option').map(function(i, opt) { var cat = $.trim($('#newznab_cat option').map(function(i, opt) {
return $(opt).text();}).toArray().join(',')); return $(opt).text();}).toArray().join(','));
if (!name) if (!name)
return; return;
if (!url) if (!url)
return; return;
if (!key) if (!key)
return; return;
var params = {name: name}; var params = {name: name};
@ -496,12 +496,12 @@ $(document).ready(function(){
}); });
$(this).on('change', "[class='providerDiv_tip'] input", function(){ $(this).on('change', '[class="providerDiv_tip"] input', function(){
$('div .providerDiv ' + "[name=" + $(this).attr('name') + "]").replaceWith($(this).clone()); $('div .providerDiv ' + '[name=' + $(this).attr('name') + ']').replaceWith($(this).clone());
$('div .providerDiv ' + "[newznab_name=" + $(this).attr('id') + "]").replaceWith($(this).clone()); $('div .providerDiv ' + '[newznab_name=' + $(this).attr('id') + ']').replaceWith($(this).clone());
}); });
$(this).on('change', "[class='providerDiv_tip'] select", function(){ $(this).on('change', '[class="providerDiv_tip"] select', function(){
$(this).find('option').each( function() { $(this).find('option').each( function() {
if ($(this).is(':selected')) { if ($(this).is(':selected')) {
@ -511,34 +511,34 @@ $(document).ready(function(){
} }
}); });
$('div .providerDiv ' + "[name=" + $(this).attr('name') + "]").empty().replaceWith($(this).clone())}); $('div .providerDiv ' + '[name=' + $(this).attr('name') + ']').empty().replaceWith($(this).clone())});
$(this).on('change', '.enabler', function(){ $(this).on('change', '.enabler', function(){
if ($(this).is(':checked')) { if ($(this).is(':checked')) {
$('.content_'+$(this).attr('id')).each( function() { $('.content_' + $(this).attr('id')).each( function() {
$(this).show() $(this).show()
}) })
} else { } else {
$('.content_'+$(this).attr('id')).each( function() { $('.content_' + $(this).attr('id')).each( function() {
$(this).hide() $(this).hide()
}) })
} }
}); });
$(".enabler").each(function(){ $('.enabler').each(function(){
if (!$(this).is(':checked')) { if (!$(this).is(':checked')) {
$('.content_'+$(this).attr('id')).hide(); $('.content_' + $(this).attr('id')).hide();
} else { } else {
$('.content_'+$(this).attr('id')).show(); $('.content_' + $(this).attr('id')).show();
} }
}); });
$.fn.makeTorrentOptionString = function(provider_id) { $.fn.makeTorrentOptionString = function(provider_id) {
var seed_ratio = $('.providerDiv_tip #'+provider_id+'_seed_ratio').prop('value'); var seed_ratio = $('.providerDiv_tip #' + provider_id + '_seed_ratio').prop('value');
var seed_time = $('.providerDiv_tip #'+provider_id+'_seed_time').prop('value'); var seed_time = $('.providerDiv_tip #' + provider_id + '_seed_time').prop('value');
var process_met = $('.providerDiv_tip #'+provider_id+'_process_method').prop('value'); var process_met = $('.providerDiv_tip #' + provider_id + '_process_method').prop('value');
var option_string = $('.providerDiv_tip #'+provider_id+'_option_string'); var option_string = $('.providerDiv_tip #' + provider_id + '_option_string');
option_string.val([seed_ratio, seed_time, process_met].join('|')) option_string.val([seed_ratio, seed_time, process_met].join('|'))
@ -548,7 +548,7 @@ $(document).ready(function(){
var provider_id = $(this).attr('id').split('_')[0]; var provider_id = $(this).attr('id').split('_')[0];
$(this).makeTorrentOptionString(provider_id); $(this).makeTorrentOptionString(provider_id);
}); });
@ -560,8 +560,8 @@ $(document).ready(function(){
self = this; self = this;
$.each(options, function(index, option) { $.each(options, function(index, option) {
$option = $("<option></option>") $option = $('<option></option>')
.attr("value", option.value) .attr('value', option.value)
.text(option.text); .text(option.text);
self.append($option); self.append($option);
}); });
@ -574,13 +574,13 @@ $(document).ready(function(){
$(this).showHideProviders(); $(this).showHideProviders();
$("#provider_order_list").sortable({ $('#provider_order_list').sortable({
placeholder: 'ui-state-highlight', placeholder: 'ui-state-highlight',
update: function (event, ui) { update: function (event, ui) {
$(this).refreshProviderList(); $(this).refreshProviderList();
} }
}); });
$("#provider_order_list").disableSelection(); $('#provider_order_list').disableSelection();
}); });

View file

@ -1,6 +1,5 @@
$(function () { $(function () {
$('.title a').each(function () { $('.title span').each(function () {
match = $(this).parent().attr("id").match(/^scene_exception_(\d+)$/);
$(this).qtip({ $(this).qtip({
content: { content: {
text: 'Loading...', text: 'Loading...',
@ -20,11 +19,11 @@ $(function () {
}, },
position: { position: {
viewport: $(window), viewport: $(window),
my: 'bottom center', my: 'left middle',
at: 'top center', at: 'right middle',
adjust: { adjust: {
y: 10, y: 0,
x: 0 x: 10
} }
}, },
style: { style: {

View file

@ -21,6 +21,6 @@ except ImportError:
# Handle the case where the requests has been patched to not have urllib3 # Handle the case where the requests has been patched to not have urllib3
# bundled as part of it's source. # bundled as part of it's source.
try: try:
from requests.packages.urllib3.response import HTTPResponse from lib.requests.packages.urllib3.response import HTTPResponse
except ImportError: except ImportError:
from urllib3.response import HTTPResponse from urllib3.response import HTTPResponse

View file

@ -441,6 +441,12 @@ class DOMParserBase(object):
self._useModule = useModule self._useModule = useModule
nrMods = len(useModule) nrMods = len(useModule)
_gotError = False _gotError = False
# Force warnings.warn() to omit the source code line in the message
formatwarning_orig = warnings.formatwarning
warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
formatwarning_orig(message, category, filename, lineno, line='')
for idx, mod in enumerate(useModule): for idx, mod in enumerate(useModule):
mod = mod.strip().lower() mod = mod.strip().lower()
try: try:

View file

@ -13,7 +13,7 @@ Requests is an HTTP library, written in Python, for human beings. Basic GET
usage: usage:
>>> import requests >>> import requests
>>> r = requests.get('http://python.org') >>> r = requests.get('https://www.python.org')
>>> r.status_code >>> r.status_code
200 200
>>> 'Python is a programming language' in r.content >>> 'Python is a programming language' in r.content
@ -22,7 +22,7 @@ usage:
... or POST: ... or POST:
>>> payload = dict(key1='value1', key2='value2') >>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload) >>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text) >>> print(r.text)
{ {
... ...
@ -42,8 +42,8 @@ is at <http://python-requests.org>.
""" """
__title__ = 'requests' __title__ = 'requests'
__version__ = '2.3.0' __version__ = '2.4.3'
__build__ = 0x020300 __build__ = 0x020403
__author__ = 'Kenneth Reitz' __author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0' __license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Kenneth Reitz' __copyright__ = 'Copyright 2014 Kenneth Reitz'

View file

@ -9,23 +9,26 @@ and maintain connections.
""" """
import socket import socket
import copy
from .models import Response from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring, urldefrag, unquote from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
except_on_missing_scheme, get_auth_from_url) prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .cookies import extract_cookies_to_jar from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError, ProxyError from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError)
from .auth import _basic_auth_str from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False DEFAULT_POOLBLOCK = False
@ -57,13 +60,15 @@ class HTTPAdapter(BaseAdapter):
:param pool_connections: The number of urllib3 connection pools to cache. :param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool. :param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection :param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and should attempt. Note, this applies only to failed DNS lookups, socket
timeouts, never to requests where the server returns a response. connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections.
:param pool_block: Whether the connection pool should block for connections. :param pool_block: Whether the connection pool should block for connections.
Usage:: Usage::
>>> import lib.requests >>> import requests
>>> s = requests.Session() >>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a) >>> s.mount('http://', a)
@ -102,14 +107,17 @@ class HTTPAdapter(BaseAdapter):
self.init_poolmanager(self._pool_connections, self._pool_maxsize, self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block) block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK): def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager. This method should not be called """Initializes a urllib3 PoolManager.
from user code, and is only exposed for use when subclassing the
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache. :param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool. :param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available. :param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
""" """
# save these values for pickling # save these values for pickling
self._pool_connections = connections self._pool_connections = connections
@ -117,7 +125,30 @@ class HTTPAdapter(BaseAdapter):
self._pool_block = block self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block) block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert): def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user """Verify a SSL certificate. This method should not be called from user
@ -204,18 +235,9 @@ class HTTPAdapter(BaseAdapter):
proxy = proxies.get(urlparse(url.lower()).scheme) proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy: if proxy:
except_on_missing_scheme(proxy) proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_headers = self.proxy_headers(proxy) proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
if not proxy in self.proxy_manager:
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block)
conn = self.proxy_manager[proxy].connection_from_url(url)
else: else:
# Only scheme should be lower case # Only scheme should be lower case
parsed = urlparse(url) parsed = urlparse(url)
@ -250,7 +272,7 @@ class HTTPAdapter(BaseAdapter):
proxy = proxies.get(scheme) proxy = proxies.get(scheme)
if proxy and scheme != 'https': if proxy and scheme != 'https':
url, _ = urldefrag(request.url) url = urldefragauth(request.url)
else: else:
url = request.path_url url = request.path_url
@ -297,7 +319,10 @@ class HTTPAdapter(BaseAdapter):
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content. :param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request. :param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates. :param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted. :param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request. :param proxies: (optional) The proxies dictionary to apply to the request.
@ -311,7 +336,18 @@ class HTTPAdapter(BaseAdapter):
chunked = not (request.body is None or 'Content-Length' in request.headers) chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout) if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try: try:
if not chunked: if not chunked:
@ -324,7 +360,7 @@ class HTTPAdapter(BaseAdapter):
assert_same_host=False, assert_same_host=False,
preload_content=False, preload_content=False,
decode_content=False, decode_content=False,
retries=self.max_retries, retries=Retry(self.max_retries, read=False),
timeout=timeout timeout=timeout
) )
@ -369,10 +405,13 @@ class HTTPAdapter(BaseAdapter):
# All is well, return the connection to the pool. # All is well, return the connection to the pool.
conn._put_conn(low_conn) conn._put_conn(low_conn)
except socket.error as sockerr: except (ProtocolError, socket.error) as err:
raise ConnectionError(sockerr, request=request) raise ConnectionError(err, request=request)
except MaxRetryError as e: except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
raise ConnectionError(e, request=request) raise ConnectionError(e, request=request)
except _ProxyError as e: except _ProxyError as e:
@ -381,14 +420,9 @@ class HTTPAdapter(BaseAdapter):
except (_SSLError, _HTTPError) as e: except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError): if isinstance(e, _SSLError):
raise SSLError(e, request=request) raise SSLError(e, request=request)
elif isinstance(e, TimeoutError): elif isinstance(e, ReadTimeoutError):
raise Timeout(e, request=request) raise ReadTimeout(e, request=request)
else: else:
raise raise
r = self.build_response(request, resp) return self.build_response(request, resp)
if not stream:
r.content
return r

View file

@ -22,12 +22,17 @@ def request(method, url, **kwargs):
:param url: URL for the new :class:`Request` object. :param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request in seconds. :param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded. :param stream: (optional) if ``False``, the response content will be immediately downloaded.
@ -41,7 +46,12 @@ def request(method, url, **kwargs):
""" """
session = sessions.Session() session = sessions.Session()
return session.request(method=method, url=url, **kwargs) response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs): def get(url, **kwargs):
@ -77,15 +87,16 @@ def head(url, **kwargs):
return request('head', url, **kwargs) return request('head', url, **kwargs)
def post(url, data=None, **kwargs): def post(url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object. """Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object. :param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes. :param \*\*kwargs: Optional arguments that ``request`` takes.
""" """
return request('post', url, data=data, **kwargs) return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs): def put(url, data=None, **kwargs):

View file

@ -16,7 +16,8 @@ from base64 import b64encode
from .compat import urlparse, str from .compat import urlparse, str
from .cookies import extract_cookies_to_jar from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data' CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
@ -25,7 +26,11 @@ CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password): def _basic_auth_str(username, password):
"""Returns a Basic Auth string.""" """Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1') authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object): class AuthBase(object):
@ -146,6 +151,11 @@ class HTTPDigestAuth(AuthBase):
return 'Digest %s' % (base) return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
setattr(self, 'num_401_calls', 1)
def handle_401(self, r, **kwargs): def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed.""" """Takes the given response and tries digest-auth, if needed."""
@ -178,7 +188,7 @@ class HTTPDigestAuth(AuthBase):
return _r return _r
setattr(self, 'num_401_calls', 1) setattr(self, 'num_401_calls', num_401_calls + 1)
return r return r
def __call__(self, r): def __call__(self, r):
@ -188,6 +198,11 @@ class HTTPDigestAuth(AuthBase):
try: try:
self.pos = r.body.tell() self.pos = r.body.tell()
except AttributeError: except AttributeError:
pass # In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r return r

View file

@ -11,14 +11,15 @@ If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately environment, you can change the definition of where() to return a separately
packaged CA bundle. packaged CA bundle.
""" """
import os.path import os.path
try:
def where(): from certifi import where
"""Return the preferred certificate bundle.""" except ImportError:
# vendored bundle inside Requests def where():
return os.path.join(os.path.dirname(__file__), 'cacert.pem') """Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__': if __name__ == '__main__':
print(where()) print(where())

View file

@ -75,7 +75,9 @@ is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try: try:
import simplejson as json import simplejson as json
except ImportError: except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it thows a SyntaxError
# because of u'...' Unicode literals.
import json import json
# --------- # ---------
@ -90,7 +92,6 @@ if is_py2:
from Cookie import Morsel from Cookie import Morsel
from StringIO import StringIO from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str builtin_str = str
bytes = str bytes = str
@ -106,7 +107,6 @@ elif is_py3:
from http.cookies import Morsel from http.cookies import Morsel
from io import StringIO from io import StringIO
from collections import OrderedDict from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str builtin_str = str
str = str str = str

View file

@ -44,7 +44,23 @@ class SSLError(ConnectionError):
class Timeout(RequestException): class Timeout(RequestException):
"""The request timed out.""" """The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException): class URLRequired(RequestException):
@ -73,3 +89,6 @@ class ChunkedEncodingError(RequestException):
class ContentDecodingError(RequestException, BaseHTTPError): class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content""" """Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""

View file

@ -19,31 +19,36 @@ from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import DecodeError from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError)
from .exceptions import ( from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL, HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError, ContentDecodingError) ChunkedEncodingError, ContentDecodingError, ConnectionError,
StreamConsumedError)
from .utils import ( from .utils import (
guess_filename, get_auth_from_url, requote_uri, guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links, stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string) iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import ( from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO, cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead) is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically #: The set of HTTP status codes that indicate an automatically
#: processable redirect. #: processable redirect.
REDIRECT_STATI = ( REDIRECT_STATI = (
codes.moved, # 301 codes.moved, # 301
codes.found, # 302 codes.found, # 302
codes.other, # 303 codes.other, # 303
codes.temporary_moved, # 307 codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
) )
DEFAULT_REDIRECT_LIMIT = 30 DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024 CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512 ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object): class RequestEncodingMixin(object):
@property @property
@ -187,7 +192,8 @@ class Request(RequestHooksMixin):
:param url: URL to send. :param url: URL to send.
:param headers: dictionary of headers to send. :param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload. :param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL. :param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple. :param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request. :param cookies: dictionary or CookieJar of cookies to attach to this request.
@ -210,7 +216,8 @@ class Request(RequestHooksMixin):
params=None, params=None,
auth=None, auth=None,
cookies=None, cookies=None,
hooks=None): hooks=None,
json=None):
# Default empty dicts for dict params. # Default empty dicts for dict params.
data = [] if data is None else data data = [] if data is None else data
@ -228,6 +235,7 @@ class Request(RequestHooksMixin):
self.headers = headers self.headers = headers
self.files = files self.files = files
self.data = data self.data = data
self.json = json
self.params = params self.params = params
self.auth = auth self.auth = auth
self.cookies = cookies self.cookies = cookies
@ -244,6 +252,7 @@ class Request(RequestHooksMixin):
headers=self.headers, headers=self.headers,
files=self.files, files=self.files,
data=self.data, data=self.data,
json=self.json,
params=self.params, params=self.params,
auth=self.auth, auth=self.auth,
cookies=self.cookies, cookies=self.cookies,
@ -287,14 +296,15 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.hooks = default_hooks() self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None, def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None): data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters.""" """Prepares the entire request with the given parameters."""
self.prepare_method(method) self.prepare_method(method)
self.prepare_url(url, params) self.prepare_url(url, params)
self.prepare_headers(headers) self.prepare_headers(headers)
self.prepare_cookies(cookies) self.prepare_cookies(cookies)
self.prepare_body(data, files) self.prepare_body(data, files, json)
self.prepare_auth(auth, url) self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes # Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request. # such as OAuth to work on a fully prepared request.
@ -309,8 +319,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
p = PreparedRequest() p = PreparedRequest()
p.method = self.method p.method = self.method
p.url = self.url p.url = self.url
p.headers = self.headers.copy() p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body p.body = self.body
p.hooks = self.hooks p.hooks = self.hooks
return p return p
@ -324,15 +334,18 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
def prepare_url(self, url, params): def prepare_url(self, url, params):
"""Prepares the given HTTP URL.""" """Prepares the given HTTP URL."""
#: Accept objects that have string representations. #: Accept objects that have string representations.
try: #: We're unable to blindy call unicode/str functions
url = unicode(url) #: as this will include the bytestring indicator (b'')
except NameError: #: on python 3.x.
# We're on Python 3. #: https://github.com/kennethreitz/requests/pull/2238
url = str(url) if isinstance(url, bytes):
except UnicodeDecodeError: url = url.decode('utf8')
pass else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for oddball schemes # Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'): if ':' in url and not url.lower().startswith('http'):
self.url = url self.url = url
return return
@ -395,7 +408,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
else: else:
self.headers = CaseInsensitiveDict() self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files): def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data.""" """Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator. # Check if file, fo, generator, iterator.
@ -406,11 +419,13 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
content_type = None content_type = None
length = None length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([ is_stream = all([
hasattr(data, '__iter__'), hasattr(data, '__iter__'),
not isinstance(data, basestring), not isinstance(data, (basestring, list, tuple, dict))
not isinstance(data, list),
not isinstance(data, dict)
]) ])
try: try:
@ -433,9 +448,9 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
if files: if files:
(body, content_type) = self._encode_files(files, data) (body, content_type) = self._encode_files(files, data)
else: else:
if data: if data and json is None:
body = self._encode_params(data) body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'): if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None content_type = None
else: else:
content_type = 'application/x-www-form-urlencoded' content_type = 'application/x-www-form-urlencoded'
@ -443,7 +458,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
self.prepare_content_length(body) self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided. # Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers): if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type self.headers['Content-Type'] = content_type
self.body = body self.body = body
@ -457,7 +472,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
l = super_len(body) l = super_len(body)
if l: if l:
self.headers['Content-Length'] = builtin_str(l) self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'): elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0' self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''): def prepare_auth(self, auth, url=''):
@ -558,6 +573,10 @@ class Response(object):
#: and the arrival of the response (as a timedelta) #: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0) self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self): def __getstate__(self):
# Consume everything; accessing the content attribute makes # Consume everything; accessing the content attribute makes
# sure the content has been fully read. # sure the content has been fully read.
@ -607,6 +626,11 @@ class Response(object):
""" """
return ('location' in self.headers and self.status_code in REDIRECT_STATI) return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property @property
def apparent_encoding(self): def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library""" """The apparent encoding, provided by the chardet library"""
@ -618,21 +642,22 @@ class Response(object):
large responses. The chunk size is the number of bytes it should large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item read into memory. This is not necessarily the length of each item
returned as decoding can take place. returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate(): def generate():
try: try:
# Special case for urllib3. # Special case for urllib3.
try: try:
for chunk in self.raw.stream(chunk_size, decode_content=True): for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk yield chunk
except IncompleteRead as e: except ProtocolError as e:
raise ChunkedEncodingError(e) raise ChunkedEncodingError(e)
except DecodeError as e: except DecodeError as e:
raise ContentDecodingError(e) raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError: except AttributeError:
# Standard file-like object. # Standard file-like object.
while True: while True:
@ -643,14 +668,21 @@ class Response(object):
self._content_consumed = True self._content_consumed = True
gen = generate() if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode: if decode_unicode:
gen = stream_decode_response_unicode(gen, self) chunks = stream_decode_response_unicode(chunks, self)
return gen return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None): def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When """Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the stream=True is set on the request, this avoids reading the
content at once into memory for large responses. content at once into memory for large responses.
@ -662,7 +694,11 @@ class Response(object):
if pending is not None: if pending is not None:
chunk = pending + chunk chunk = pending + chunk
lines = chunk.splitlines()
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop() pending = lines.pop()
@ -793,8 +829,8 @@ class Response(object):
raise HTTPError(http_error_msg, response=self) raise HTTPError(http_error_msg, response=self)
def close(self): def close(self):
"""Closes the underlying file descriptor and releases the connection """Releases the connection back to the pool. Once this method has been
back to the pool. called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.* *Note: Should not normally need to be called explicitly.*
""" """

View file

@ -1,9 +1,3 @@
# urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
""" """
urllib3 - Thread-safe connection pooling and re-using. urllib3 - Thread-safe connection pooling and re-using.
""" """
@ -23,7 +17,10 @@ from . import exceptions
from .filepost import encode_multipart_formdata from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse from .response import HTTPResponse
from .util import make_headers, get_host, Timeout from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings. # Set default logging handler to avoid "No handler found" warnings.
@ -51,8 +48,19 @@ def add_stderr_logger(level=logging.DEBUG):
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler) logger.addHandler(handler)
logger.setLevel(level) logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__) logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler return handler
# ... Clean up. # ... Clean up.
del NullHandler del NullHandler
# Set security warning to only go off once by default.
import warnings
warnings.simplefilter('module', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)

View file

@ -1,10 +1,4 @@
# urllib3/_collections.py from collections import Mapping, MutableMapping
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
try: try:
from threading import RLock from threading import RLock
except ImportError: # Platform-specific: No threads available except ImportError: # Platform-specific: No threads available
@ -20,9 +14,10 @@ try: # Python 2.7+
from collections import OrderedDict from collections import OrderedDict
except ImportError: except ImportError:
from .packages.ordered_dict import OrderedDict from .packages.ordered_dict import OrderedDict
from .packages.six import itervalues
__all__ = ['RecentlyUsedContainer'] __all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object() _Null = object()
@ -101,3 +96,104 @@ class RecentlyUsedContainer(MutableMapping):
def keys(self): def keys(self):
with self.lock: with self.lock:
return self._container.keys() return self._container.keys()
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))

View file

@ -1,88 +1,146 @@
# urllib3/connection.py import datetime
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) import sys
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import socket import socket
from socket import timeout as SocketTimeout from socket import timeout as SocketTimeout
import warnings
try: # Python 3 try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError: except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object): class DummyConnection(object):
"Used to detect a failed ConnectionCls import." "Used to detect a failed ConnectionCls import."
pass pass
try: # Compiled with SSL?
ssl = None try: # Compiled with SSL?
HTTPSConnection = DummyConnection HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException): class BaseSSLError(BaseException):
pass pass
try: # Python 3
from http.client import HTTPSConnection as _HTTPSConnection
except ImportError:
from httplib import HTTPSConnection as _HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .exceptions import ( from .exceptions import (
ConnectTimeoutError, ConnectTimeoutError,
SystemTimeWarning,
) )
from .packages.ssl_match_hostname import match_hostname from .packages.ssl_match_hostname import match_hostname
from .util import ( from .packages import six
assert_fingerprint,
from .util.ssl_ import (
resolve_cert_reqs, resolve_cert_reqs,
resolve_ssl_version, resolve_ssl_version,
ssl_wrap_socket, ssl_wrap_socket,
assert_fingerprint,
) )
from .util import connection
port_by_scheme = { port_by_scheme = {
'http': 80, 'http': 80,
'https': 443, 'https': 443,
} }
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object): class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http'] default_port = port_by_scheme['http']
# By default, disable Nagle's Algorithm. #: Disable Nagle's algorithm by default.
tcp_nodelay = 1 #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self): def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it """ Establish a socket connection and set nodelay settings on it.
:return: a new socket connection :return: New socket connection.
""" """
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try: try:
conn = socket.create_connection( conn = connection.create_connection(
(self.host, self.port), (self.host, self.port), self.timeout, **extra_kw)
self.timeout,
self.source_address, except SocketTimeout:
) raise ConnectTimeoutError(
except AttributeError: # Python 2.6 self, "Connection to %s timed out. (connect timeout=%s)" %
conn = socket.create_connection( (self.host, self.timeout))
(self.host, self.port),
self.timeout,
)
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
self.tcp_nodelay)
return conn return conn
def _prepare_conn(self, conn): def _prepare_conn(self, conn):
self.sock = conn self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None): if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state. # TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel() self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self): def connect(self):
conn = self._new_conn() conn = self._new_conn()
@ -93,15 +151,18 @@ class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https'] default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None, def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
source_address=None):
try: HTTPConnection.__init__(self, host, port, strict=strict,
HTTPConnection.__init__(self, host, port, strict, timeout, source_address) timeout=timeout, **kw)
except TypeError: # Python 2.6
HTTPConnection.__init__(self, host, port, strict, timeout)
self.key_file = key_file self.key_file = key_file
self.cert_file = cert_file self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self): def connect(self):
conn = self._new_conn() conn = self._new_conn()
self._prepare_conn(conn) self._prepare_conn(conn)
@ -116,6 +177,7 @@ class VerifiedHTTPSConnection(HTTPSConnection):
cert_reqs = None cert_reqs = None
ca_certs = None ca_certs = None
ssl_version = None ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None, def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None, cert_reqs=None, ca_certs=None,
@ -130,46 +192,52 @@ class VerifiedHTTPSConnection(HTTPSConnection):
def connect(self): def connect(self):
# Add certificate verification # Add certificate verification
try: conn = self._new_conn()
sock = socket.create_connection(
address=(self.host, self.port),
timeout=self.timeout,
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
self.tcp_nodelay)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version) resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# the _tunnel_host attribute was added in python 2.6.3 (via hostname = self.host
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None): if getattr(self, '_tunnel_host', None):
self.sock = sock # _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is # Calls self._set_hostport(), so self.host is
# self._tunnel_host below. # self._tunnel_host below.
self._tunnel() self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in # Wrap socket using verification with the root certs in
# trusted_root_certs # trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file, self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs, cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs, ca_certs=self.ca_certs,
server_hostname=self.host, server_hostname=hostname,
ssl_version=resolved_ssl_version) ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE: if self.assert_fingerprint:
if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True),
assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint)
self.assert_fingerprint) elif resolved_cert_reqs != ssl.CERT_NONE \
elif self.assert_hostname is not False: and self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(), match_hostname(self.sock.getpeercert(),
self.assert_hostname or self.host) self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl: if ssl:

View file

@ -1,16 +1,12 @@
# urllib3/connectionpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import errno import errno
import logging import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout from socket import error as SocketError, timeout as SocketTimeout
import socket import socket
try: # Python 3 try: # Python 3
from queue import LifoQueue, Empty, Full from queue import LifoQueue, Empty, Full
except ImportError: except ImportError:
from Queue import LifoQueue, Empty, Full from Queue import LifoQueue, Empty, Full
@ -19,14 +15,16 @@ except ImportError:
from .exceptions import ( from .exceptions import (
ClosedPoolError, ClosedPoolError,
ConnectTimeoutError, ProtocolError,
EmptyPoolError, EmptyPoolError,
HostChangedError, HostChangedError,
LocationValueError,
MaxRetryError, MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError, SSLError,
TimeoutError, TimeoutError,
ReadTimeoutError, InsecureRequestWarning,
ProxyError,
) )
from .packages.ssl_match_hostname import CertificateError from .packages.ssl_match_hostname import CertificateError
from .packages import six from .packages import six
@ -38,12 +36,11 @@ from .connection import (
) )
from .request import RequestMethods from .request import RequestMethods
from .response import HTTPResponse from .response import HTTPResponse
from .util import (
assert_fingerprint, from .util.connection import is_connection_dropped
get_host, from .util.retry import Retry
is_connection_dropped, from .util.timeout import Timeout
Timeout, from .util.url import get_host
)
xrange = six.moves.xrange xrange = six.moves.xrange
@ -52,8 +49,8 @@ log = logging.getLogger(__name__)
_Default = object() _Default = object()
## Pool objects
## Pool objects
class ConnectionPool(object): class ConnectionPool(object):
""" """
Base class for all connection pools, such as Base class for all connection pools, such as
@ -64,10 +61,11 @@ class ConnectionPool(object):
QueueCls = LifoQueue QueueCls = LifoQueue
def __init__(self, host, port=None): def __init__(self, host, port=None):
# httplib doesn't like it when we include brackets in ipv6 addresses if not host:
host = host.strip('[]') raise LocationValueError("No host specified.")
self.host = host # httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port self.port = port
def __str__(self): def __str__(self):
@ -77,6 +75,7 @@ class ConnectionPool(object):
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods): class HTTPConnectionPool(ConnectionPool, RequestMethods):
""" """
Thread-safe connection pool for one host. Thread-safe connection pool for one host.
@ -121,6 +120,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
Headers to include with all requests, unless other headers are given Headers to include with all requests, unless other headers are given
explicitly. explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy: :param _proxy:
Parsed proxy URL, should not be used directly, instead, see Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`" :class:`urllib3.connectionpool.ProxyManager`"
@ -128,6 +130,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
:param _proxy_headers: :param _proxy_headers:
A dictionary with proxy headers, should not be used directly, A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`" instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
""" """
scheme = 'http' scheme = 'http'
@ -135,18 +141,22 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
def __init__(self, host, port=None, strict=False, def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, _proxy=None, _proxy_headers=None): headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port) ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers) RequestMethods.__init__(self, headers)
self.strict = strict self.strict = strict
# This is for backwards compatibility and can be removed once a timeout
# can only be set to a Timeout object
if not isinstance(timeout, Timeout): if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout) timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize) self.pool = self.QueueCls(maxsize)
self.block = block self.block = block
@ -161,6 +171,13 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# These are mostly for testing and debugging purposes. # These are mostly for testing and debugging purposes.
self.num_connections = 0 self.num_connections = 0
self.num_requests = 0 self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self): def _new_conn(self):
""" """
@ -170,17 +187,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
log.info("Starting new HTTP connection (%d): %s" % log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host)) (self.num_connections, self.host))
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=self.host, port=self.port, conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout, timeout=self.timeout.connect_timeout,
**extra_params) strict=self.strict, **self.conn_kw)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return conn return conn
def _get_conn(self, timeout=None): def _get_conn(self, timeout=None):
@ -199,7 +208,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
try: try:
conn = self.pool.get(block=self.block, timeout=timeout) conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.") raise ClosedPoolError(self, "Pool is closed.")
except Empty: except Empty:
@ -213,6 +222,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if conn and is_connection_dropped(conn): if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host) log.info("Resetting dropped connection: %s" % self.host)
conn.close() conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn() return conn or self._new_conn()
@ -232,19 +246,26 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
""" """
try: try:
self.pool.put(conn, block=False) self.pool.put(conn, block=False)
return # Everything is dandy, done. return # Everything is dandy, done.
except AttributeError: except AttributeError:
# self.pool is None. # self.pool is None.
pass pass
except Full: except Full:
# This should never happen if self.block == True # This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s" log.warning(
% self.host) "Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it. # Connection never got put back into the pool, close it.
if conn: if conn:
conn.close() conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _get_timeout(self, timeout): def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """ """ Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default: if timeout is _Default:
@ -276,23 +297,21 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
self.num_requests += 1 self.num_requests += 1
timeout_obj = self._get_timeout(timeout) timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
try: # Trigger any extra validation we need to do.
timeout_obj.start_connect() self._validate_conn(conn)
conn.timeout = timeout_obj.connect_timeout
# conn.request() calls httplib.*.request, not the method in # conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket. # urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw) conn.request(method, url, **httplib_request_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, timeout_obj.connect_timeout))
# Reset the timeout for the recv() on the socket # Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr # App Engine doesn't have a sock attr
if hasattr(conn, 'sock'): if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you # In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which # try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching # instead raises a BadStatusLine exception. Instead of catching
@ -300,18 +319,17 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# timeouts, check for a zero timeout before making the request. # timeouts, check for a zero timeout before making the request.
if read_timeout == 0: if read_timeout == 0:
raise ReadTimeoutError( raise ReadTimeoutError(
self, url, self, url, "Read timed out. (read timeout=%s)" % read_timeout)
"Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT: if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout()) conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value else: # None or a value
conn.sock.settimeout(read_timeout) conn.sock.settimeout(read_timeout)
# Receive the response from the server # Receive the response from the server
try: try:
try: # Python 2.7+, use buffering of HTTP responses try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True) httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse() httplib_response = conn.getresponse()
except SocketTimeout: except SocketTimeout:
raise ReadTimeoutError( raise ReadTimeoutError(
@ -323,17 +341,17 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# http://bugs.python.org/issue10272 # http://bugs.python.org/issue10272
if 'timed out' in str(e) or \ if 'timed out' in str(e) or \
'did not complete (read)' in str(e): # Python 2.6 'did not complete (read)' in str(e): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out.") raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
raise raise
except SocketError as e: # Platform-specific: Python 2 except SocketError as e: # Platform-specific: Python 2
# See the above comment about EAGAIN in Python 3. In Python 2 we # See the above comment about EAGAIN in Python 3. In Python 2 we
# have to specifically catch it and throw the timeout error # have to specifically catch it and throw the timeout error
if e.errno in _blocking_errnos: if e.errno in _blocking_errnos:
raise ReadTimeoutError( raise ReadTimeoutError(
self, url, self, url, "Read timed out. (read timeout=%s)" % read_timeout)
"Read timed out. (read timeout=%s)" % read_timeout)
raise raise
@ -358,7 +376,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
conn.close() conn.close()
except Empty: except Empty:
pass # Done. pass # Done.
def is_same_host(self, url): def is_same_host(self, url):
""" """
@ -379,7 +397,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
return (scheme, host, port) == (self.scheme, self.host, self.port) return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3, def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default, redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw): pool_timeout=None, release_conn=None, **response_kw):
""" """
@ -413,11 +431,25 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
these headers completely replace any pool-specific headers. these headers completely replace any pool-specific headers.
:param retries: :param retries:
Number of retries to allow before raising a MaxRetryError exception. Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect: :param redirect:
If True, automatically handle redirects (status codes 301, 302, If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. 303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host: :param assert_same_host:
If ``True``, will make sure that the host of the pool requests is If ``True``, will make sure that the host of the pool requests is
@ -451,15 +483,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if headers is None: if headers is None:
headers = self.headers headers = self.headers
if retries < 0: if not isinstance(retries, Retry):
raise MaxRetryError(self, url) retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None: if release_conn is None:
release_conn = response_kw.get('preload_content', True) release_conn = response_kw.get('preload_content', True)
# Check host # Check host
if assert_same_host and not self.is_same_host(url): if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries - 1) raise HostChangedError(self, url, retries)
conn = None conn = None
@ -470,11 +502,15 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
headers = headers.copy() headers = headers.copy()
headers.update(self.proxy_headers) headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try: try:
# Request a connection from the queue # Request a connection from the queue.
conn = self._get_conn(timeout=pool_timeout) conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object # Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url, httplib_response = self._make_request(conn, method, url,
timeout=timeout, timeout=timeout,
body=body, headers=headers) body=body, headers=headers)
@ -497,38 +533,35 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
# ``response.read()``) # ``response.read()``)
except Empty: except Empty:
# Timed out by queue # Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.") raise EmptyPoolError(self, "No pool connections are available.")
except BaseSSLError as e: except (BaseSSLError, CertificateError) as e:
# Release connection unconditionally because there is no way to
# close it externally in case of exception.
release_conn = True
raise SSLError(e) raise SSLError(e)
except CertificateError as e: except (TimeoutError, HTTPException, SocketError) as e:
# Name mismatch if conn:
raise SSLError(e) # Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
except TimeoutError as e: stacktrace = sys.exc_info()[2]
# Connection broken, discard. if isinstance(e, SocketError) and self.proxy:
conn = None e = ProxyError('Cannot connect to proxy.', e)
# Save the error off for retry logic. elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e,
_pool=self, _stacktrace=stacktrace)
retries.sleep()
# Keep track of the error for the retry warning.
err = e err = e
if retries == 0:
raise
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
if isinstance(e, SocketError) and self.proxy is not None:
raise ProxyError('Cannot connect to proxy. '
'Socket error: %s.' % e)
else:
raise MaxRetryError(self, url, e)
finally: finally:
if release_conn: if release_conn:
# Put the connection back to be reused. If the connection is # Put the connection back to be reused. If the connection is
@ -538,9 +571,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if not conn: if not conn:
# Try again # Try again
log.warn("Retrying (%d attempts remain) after connection " log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url)) "broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1, return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout, timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw) release_conn=release_conn, **response_kw)
@ -550,11 +583,31 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods):
if redirect_location: if redirect_location:
if response.status == 303: if response.status == 303:
method = 'GET' method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location)) log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers, return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host, retries=retries, redirect=redirect,
timeout=timeout, pool_timeout=pool_timeout, assert_same_host=assert_same_host,
release_conn=release_conn, **response_kw) timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response return response
@ -581,15 +634,17 @@ class HTTPSConnectionPool(HTTPConnectionPool):
ConnectionCls = HTTPSConnection ConnectionCls = HTTPSConnection
def __init__(self, host, port=None, def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None, _proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None, key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None, ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None): assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, _proxy, _proxy_headers) block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file self.key_file = key_file
self.cert_file = cert_file self.cert_file = cert_file
self.cert_reqs = cert_reqs self.cert_reqs = cert_reqs
@ -619,7 +674,12 @@ class HTTPSConnectionPool(HTTPConnectionPool):
set_tunnel = conn.set_tunnel set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6 except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel set_tunnel = conn._set_tunnel
set_tunnel(self.host, self.port, self.proxy_headers)
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib # Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port. # would improperly set Host: header to proxy's IP:port.
conn.connect() conn.connect()
@ -645,20 +705,32 @@ class HTTPSConnectionPool(HTTPConnectionPool):
actual_host = self.proxy.host actual_host = self.proxy.host
actual_port = self.proxy.port actual_port = self.proxy.port
extra_params = {}
if not six.PY3: # Python 2
extra_params['strict'] = self.strict
conn = self.ConnectionCls(host=actual_host, port=actual_port, conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout, timeout=self.timeout.connect_timeout,
**extra_params) strict=self.strict, **self.conn_kw)
if self.proxy is not None:
# Enable Nagle's algorithm for proxies, to avoid packet
# fragmentation.
conn.tcp_nodelay = 0
return self._prepare_conn(conn) return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
"""
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html '
'(This warning will only appear once by default.)'),
InsecureRequestWarning)
"""
def connection_from_url(url, **kw): def connection_from_url(url, **kw):
""" """
@ -675,7 +747,7 @@ def connection_from_url(url, **kw):
:class:`.ConnectionPool`. Useful for specifying things like :class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc. timeout, maxsize, headers, etc.
Example: :: Example::
>>> conn = connection_from_url('http://google.com/') >>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/') >>> r = conn.request('GET', '/')

View file

@ -1,9 +1,3 @@
# urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
""" """
NTLM authenticating pool, contributed by erikcederstran NTLM authenticating pool, contributed by erikcederstran

View file

@ -1,4 +1,7 @@
'''SSL with SNI_-support for Python 2. '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed: This needs the following packages installed:
@ -6,9 +9,15 @@ This needs the following packages installed:
* ndg-httpsclient (tested with 0.3.2) * ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6) * pyasn1 (tested with 0.1.6)
To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`. You can install them with the following command:
This can be done in a ``sitecustomize`` module, or at any other time before
your application begins using ``urllib3``, like this:: pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try: try:
import urllib3.contrib.pyopenssl import urllib3.contrib.pyopenssl
@ -29,24 +38,26 @@ Module Variables
---------------- ----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites. :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA256 Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA EECDH RC4 !aNULL !eNULL !LOW !3DES ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
!MD5 !EXP !PSK !SRP !DSS'``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
''' '''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT try:
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
except SyntaxError as e:
raise ImportError(e)
import OpenSSL.SSL import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint from pyasn1.type import univ, constraint
from socket import _fileobject from socket import _fileobject, timeout
import ssl import ssl
import select import select
from cStringIO import StringIO
from .. import connection from .. import connection
from .. import util from .. import util
@ -57,11 +68,18 @@ __all__ = ['inject_into_urllib3', 'extract_from_urllib3']
HAS_SNI = SUBJ_ALT_NAME_SUPPORT HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values. # Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = { try:
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, _openssl_versions = {
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD, ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
} ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
except AttributeError:
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = { _openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
@ -69,12 +87,22 @@ _openssl_verify = {
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
} }
# Default SSL/TLS cipher list. # A secure default.
# Recommendation by https://community.qualys.com/blogs/securitylabs/2013/08/05/ # Sources for more information on TLS ciphers:
# configuring-apache-nginx-and-openssl-for-forward-secrecy #
DEFAULT_SSL_CIPHER_LIST = 'EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM ' + \ # - https://wiki.mozilla.org/Security/Server_Side_TLS
'EECDH+ECDSA+SHA256 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA ' + \ # - https://www.ssllabs.com/projects/best-practices/index.html
'EECDH RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS' # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI orig_util_HAS_SNI = util.HAS_SNI
@ -137,184 +165,43 @@ def get_subj_alt_name(peer_cert):
return dns_name return dns_name
class fileobject(_fileobject):
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object): class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.''' '''API-compatibility wrapper for Python OpenSSL's Connection-class.
def __init__(self, connection, socket): Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection self.connection = connection
self.socket = socket self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def fileno(self): def fileno(self):
return self.socket.fileno() return self.socket.fileno()
def makefile(self, mode, bufsize=-1): def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize) self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd, wd, ed = select.select(
[self.socket], [], [], self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def settimeout(self, timeout): def settimeout(self, timeout):
return self.socket.settimeout(timeout) return self.socket.settimeout(timeout)
@ -323,7 +210,10 @@ class WrappedSocket(object):
return self.connection.sendall(data) return self.connection.sendall(data)
def close(self): def close(self):
return self.connection.shutdown() if self._makefile_refs < 1:
return self.connection.shutdown()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False): def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate() x509 = self.connection.get_peer_certificate()
@ -346,6 +236,15 @@ class WrappedSocket(object):
] ]
} }
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
def _verify_callback(cnx, x509, err_no, err_depth, return_code): def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0 return err_no == 0
@ -366,6 +265,8 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ctx.load_verify_locations(ca_certs, None) ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e: except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e) raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309) # Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000 OP_NO_COMPRESSION = 0x20000

View file

@ -1,9 +1,3 @@
# urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions ## Base Exceptions
@ -11,6 +5,11 @@ class HTTPError(Exception):
"Base exception used by this module." "Base exception used by this module."
pass pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError): class PoolError(HTTPError):
"Base exception for errors caused within a pool." "Base exception for errors caused within a pool."
@ -49,17 +48,33 @@ class DecodeError(HTTPError):
pass pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions ## Leaf Exceptions
class MaxRetryError(RequestError): class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded." """Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None): def __init__(self, pool, url, reason=None):
self.reason = reason self.reason = reason
message = "Max retries exceeded with url: %s" % url message = "Max retries exceeded with url: %s" % url
if reason: if reason:
message += " (Caused by %s: %s)" % (type(reason), reason) message += " (Caused by %r)" % reason
else: else:
message += " (Caused by redirect)" message += " (Caused by redirect)"
@ -111,7 +126,12 @@ class ClosedPoolError(PoolError):
pass pass
class LocationParseError(ValueError, HTTPError): class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input." "Raised when get_host or similar fails to parse the URL input."
def __init__(self, location): def __init__(self, location):
@ -119,3 +139,18 @@ class LocationParseError(ValueError, HTTPError):
HTTPError.__init__(self, message) HTTPError.__init__(self, message)
self.location = location self.location = location
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass

View file

@ -1,9 +1,3 @@
# urllib3/fields.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import email.utils import email.utils
import mimetypes import mimetypes
@ -15,7 +9,7 @@ def guess_content_type(filename, default='application/octet-stream'):
Guess the "Content-Type" of a file. Guess the "Content-Type" of a file.
:param filename: :param filename:
The filename to guess the "Content-Type" of using :mod:`mimetimes`. The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default: :param default:
If no "Content-Type" can be guessed, default to `default`. If no "Content-Type" can be guessed, default to `default`.
""" """
@ -78,9 +72,10 @@ class RequestField(object):
""" """
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from parameter Supports constructing :class:`~urllib3.fields.RequestField` from
of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) parameter of key/value strings AND key/filetuple. A filetuple is a
tuple where the MIME type is optional. For example: :: (filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar', 'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'), 'fakefile': ('foofile.txt', 'contents of foofile'),
@ -125,8 +120,8 @@ class RequestField(object):
'Content-Disposition' fields. 'Content-Disposition' fields.
:param header_parts: :param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format as A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
`k1="v1"; k2="v2"; ...`. as `k1="v1"; k2="v2"; ...`.
""" """
parts = [] parts = []
iterable = header_parts iterable = header_parts
@ -158,7 +153,8 @@ class RequestField(object):
lines.append('\r\n') lines.append('\r\n')
return '\r\n'.join(lines) return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None, content_location=None): def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
""" """
Makes this request field into a multipart request field. Makes this request field into a multipart request field.
@ -172,6 +168,10 @@ class RequestField(object):
""" """
self.headers['Content-Disposition'] = content_disposition or 'form-data' self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join(['', self._render_parts((('name', self._name), ('filename', self._filename)))]) self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location self.headers['Content-Location'] = content_location

View file

@ -1,11 +1,4 @@
# urllib3/filepost.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs import codecs
import mimetypes
from uuid import uuid4 from uuid import uuid4
from io import BytesIO from io import BytesIO
@ -38,10 +31,10 @@ def iter_field_objects(fields):
i = iter(fields) i = iter(fields)
for field in i: for field in i:
if isinstance(field, RequestField): if isinstance(field, RequestField):
yield field yield field
else: else:
yield RequestField.from_tuples(*field) yield RequestField.from_tuples(*field)
def iter_fields(fields): def iter_fields(fields):

View file

@ -2,7 +2,6 @@
# Passes Python2.7's test suite and incorporates all the latest updates. # Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License. # Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/ # http://code.activestate.com/recipes/576693/
try: try:
from thread import get_ident as _get_ident from thread import get_ident as _get_ident
except ImportError: except ImportError:

View file

@ -7,7 +7,7 @@ except ImportError:
from backports.ssl_match_hostname import CertificateError, match_hostname from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError: except ImportError:
# Our vendored copy # Our vendored copy
from _implementation import CertificateError, match_hostname from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide. # Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname') __all__ = ('CertificateError', 'match_hostname')

View file

@ -1,9 +1,3 @@
# urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging import logging
try: # Python 3 try: # Python 3
@ -14,8 +8,10 @@ except ImportError:
from ._collections import RecentlyUsedContainer from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme from .connectionpool import port_by_scheme
from .exceptions import LocationValueError
from .request import RequestMethods from .request import RequestMethods
from .util import parse_url from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url'] __all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
@ -49,7 +45,7 @@ class PoolManager(RequestMethods):
Additional parameters are used to create fresh Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances. :class:`urllib3.connectionpool.ConnectionPool` instances.
Example: :: Example::
>>> manager = PoolManager(num_pools=2) >>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/')
@ -102,10 +98,11 @@ class PoolManager(RequestMethods):
``urllib3.connectionpool.port_by_scheme``. ``urllib3.connectionpool.port_by_scheme``.
""" """
if not host:
raise LocationValueError("No host specified.")
scheme = scheme or 'http' scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80) port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port) pool_key = (scheme, host, port)
with self.pools.lock: with self.pools.lock:
@ -118,6 +115,7 @@ class PoolManager(RequestMethods):
# Make a fresh ConnectionPool of the desired type # Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port) pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool self.pools[pool_key] = pool
return pool return pool
def connection_from_url(self, url): def connection_from_url(self, url):
@ -161,13 +159,18 @@ class PoolManager(RequestMethods):
# Support relative URLs for redirecting. # Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location) redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4 # RFC 7231, Section 6.4.4
if response.status == 303: if response.status == 303:
method = 'GET' method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location)) retries = kw.get('retries')
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
kw['retries'] = retries.increment(method, redirect_location)
kw['redirect'] = redirect kw['redirect'] = redirect
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, **kw) return self.urlopen(method, redirect_location, **kw)
@ -208,12 +211,16 @@ class ProxyManager(PoolManager):
if not proxy.port: if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80) port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port) proxy = proxy._replace(port=port)
assert proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % proxy.scheme
self.proxy = proxy self.proxy = proxy
self.proxy_headers = proxy_headers or {} self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__( super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw) num_pools, headers, **connection_pool_kw)
@ -248,10 +255,10 @@ class ProxyManager(PoolManager):
# For proxied HTTPS requests, httplib sets the necessary headers # For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely # on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least. # need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers', headers = kw.get('headers', self.headers)
self.headers)) kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect, **kw) return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw): def proxy_from_url(url, **kw):

View file

@ -1,9 +1,3 @@
# urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try: try:
from urllib.parse import urlencode from urllib.parse import urlencode
except ImportError: except ImportError:
@ -26,8 +20,8 @@ class RequestMethods(object):
Specifically, Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded :meth:`.request_encode_url` is for sending requests whose fields are
in the URL (such as GET, HEAD, DELETE). encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are :meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded encoded in the *body* of the request using multipart or www-form-urlencoded
@ -51,7 +45,7 @@ class RequestMethods(object):
def urlopen(self, method, url, body=None, headers=None, def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None, encode_multipart=True, multipart_boundary=None,
**kw): # Abstract **kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement " raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.") "their own ``urlopen`` method.")
@ -61,8 +55,8 @@ class RequestMethods(object):
``fields`` based on the ``method`` used. ``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option effort. It can be used in most situations, while still having the
to drop down to more specific methods when necessary, such as option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`, :meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`. or even the lowest level :meth:`urlopen`.
""" """
@ -70,12 +64,12 @@ class RequestMethods(object):
if method in self._encode_url_methods: if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields, return self.request_encode_url(method, url, fields=fields,
headers=headers, headers=headers,
**urlopen_kw) **urlopen_kw)
else: else:
return self.request_encode_body(method, url, fields=fields, return self.request_encode_body(method, url, fields=fields,
headers=headers, headers=headers,
**urlopen_kw) **urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw): def request_encode_url(self, method, url, fields=None, **urlopen_kw):
""" """
@ -94,18 +88,18 @@ class RequestMethods(object):
the body. This is useful for request methods like POST, PUT, PATCH, etc. the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
payload with the appropriate content type. Otherwise the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the :meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type. 'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing, safe to use it in other times too. However, it may break request
such as with OAuth. signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: :: the MIME type is optional. For example::
fields = { fields = {
'foo': 'bar', 'foo': 'bar',
@ -119,17 +113,17 @@ class RequestMethods(object):
When uploading a file, providing a filename (the first parameter of the When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers. tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be Note that if ``headers`` are supplied, the 'Content-Type' header will
overwritten because it depends on the dynamic random boundary string be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter. string can be explicitly set with the ``multipart_boundary`` parameter.
""" """
if encode_multipart: if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {}, body, content_type = encode_multipart_formdata(
boundary=multipart_boundary) fields or {}, boundary=multipart_boundary)
else: else:
body, content_type = (urlencode(fields or {}), body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded') 'application/x-www-form-urlencoded')
if headers is None: if headers is None:
headers = self.headers headers = self.headers

View file

@ -1,21 +1,14 @@
# urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib import zlib
import io import io
from socket import timeout as SocketTimeout
from .exceptions import DecodeError from ._collections import HTTPHeaderDict
from .exceptions import ProtocolError, DecodeError, ReadTimeoutError
from .packages.six import string_types as basestring, binary_type from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object): class DeflateDecoder(object):
@ -55,7 +48,10 @@ class HTTPResponse(io.IOBase):
HTTP Response container. HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse: Extra parameters for behaviour not present in httplib.HTTPResponse:
@ -79,7 +75,10 @@ class HTTPResponse(io.IOBase):
def __init__(self, body='', headers=None, status=0, version=0, reason=None, def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True, strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None): original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status self.status = status
self.version = version self.version = version
self.reason = reason self.reason = reason
@ -87,11 +86,14 @@ class HTTPResponse(io.IOBase):
self.decode_content = decode_content self.decode_content = decode_content
self._decoder = None self._decoder = None
self._body = body if body and isinstance(body, basestring) else None self._body = None
self._fp = None self._fp = None
self._original_response = original_response self._original_response = original_response
self._fp_bytes_read = 0 self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool self._pool = pool
self._connection = connection self._connection = connection
@ -159,8 +161,8 @@ class HTTPResponse(io.IOBase):
after having ``.read()`` the file object. (Overridden if ``amt`` is after having ``.read()`` the file object. (Overridden if ``amt`` is
set.) set.)
""" """
# Note: content-encoding value should be case-insensitive, per RFC 2616 # Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.5 # Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower() content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None: if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS: if content_encoding in self.CONTENT_DECODERS:
@ -174,23 +176,42 @@ class HTTPResponse(io.IOBase):
flush_decoder = False flush_decoder = False
try: try:
if amt is None: try:
# cStringIO doesn't like amt=None if amt is None:
data = self._fp.read() # cStringIO doesn't like amt=None
flush_decoder = True data = self._fp.read()
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if not 'read operation timed out' in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data) self._fp_bytes_read += len(data)
@ -200,8 +221,7 @@ class HTTPResponse(io.IOBase):
except (IOError, zlib.error) as e: except (IOError, zlib.error) as e:
raise DecodeError( raise DecodeError(
"Received response with content-encoding: %s, but " "Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, "failed to decode it." % content_encoding, e)
e)
if flush_decoder and decode_content and self._decoder: if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type()) buf = self._decoder.decompress(binary_type())
@ -238,7 +258,6 @@ class HTTPResponse(io.IOBase):
if data: if data:
yield data yield data
@classmethod @classmethod
def from_httplib(ResponseCls, r, **response_kw): def from_httplib(ResponseCls, r, **response_kw):
""" """
@ -249,17 +268,9 @@ class HTTPResponse(io.IOBase):
with ``original_response=r``. with ``original_response=r``.
""" """
# Normalize headers between different versions of Python headers = HTTPHeaderDict()
headers = {}
for k, v in r.getheaders(): for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised headers.add(k, v)
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute # HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0) strict = getattr(r, 'strict', 0)
@ -301,7 +312,7 @@ class HTTPResponse(io.IOBase):
elif hasattr(self._fp, "fileno"): elif hasattr(self._fp, "fileno"):
return self._fp.fileno() return self._fp.fileno()
else: else:
raise IOError("The file-like object this HTTPResponse is wrapped " raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor") "around has no file descriptor")
def flush(self): def flush(self):
@ -309,4 +320,14 @@ class HTTPResponse(io.IOBase):
return self._fp.flush() return self._fp.flush()
def readable(self): def readable(self):
# This method is required for `io` module compatibility.
return True return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)

View file

@ -1,648 +0,0 @@
# urllib3/util.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from binascii import hexlify, unhexlify
from collections import namedtuple
from hashlib import md5, sha1
from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT
import time
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError, TimeoutStateError
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
if hasattr(obj, 'fp'):
# Object is a container for another file-like object that gets released
# on exhaustion (e.g. HTTPResponse)
return obj.fp is None
return obj.closed
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)

View file

@ -0,0 +1,24 @@
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)

View file

@ -0,0 +1,97 @@
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)

View file

@ -0,0 +1,71 @@
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers

View file

@ -0,0 +1,22 @@
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")

View file

@ -0,0 +1,279 @@
import time
import logging
from ..exceptions import (
ProtocolError,
ConnectTimeoutError,
ReadTimeoutError,
MaxRetryError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we can't
assume that the server did not process any of it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/response retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries?
"""
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
else:
# FIXME: Nothing changed, scenario doesn't make sense.
_observed_errors += 1
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)

View file

@ -0,0 +1,132 @@
from binascii import hexlify, unhexlify
from hashlib import md5, sha1
from ..exceptions import SSLError
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)

View file

@ -0,0 +1,240 @@
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read

View file

@ -0,0 +1,171 @@
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port

View file

@ -12,18 +12,24 @@ import os
from collections import Mapping from collections import Mapping
from datetime import datetime from datetime import datetime
from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import ( from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import TooManyRedirects, InvalidSchema from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter from .adapters import HTTPAdapter
from .utils import requote_uri, get_environ_proxies, get_netrc_auth from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes from .status_codes import codes
@ -86,11 +92,21 @@ class SessionRedirectMixin(object):
"""Receives a Response. Returns a generator of Responses.""" """Receives a Response. Returns a generator of Responses."""
i = 0 i = 0
hist = [] # keep track of history
while resp.is_redirect: while resp.is_redirect:
prepared_request = req.copy() prepared_request = req.copy()
resp.content # Consume socket so it can be released if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects: if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
@ -110,7 +126,7 @@ class SessionRedirectMixin(object):
parsed = urlparse(url) parsed = urlparse(url)
url = parsed.geturl() url = parsed.geturl()
# Facilitate non-RFC2616-compliant 'location' headers # Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url. # Compliant with RFC3986, we percent encode the url.
if not urlparse(url).netloc: if not urlparse(url).netloc:
@ -119,8 +135,11 @@ class SessionRedirectMixin(object):
url = requote_uri(url) url = requote_uri(url)
prepared_request.url = to_native_string(url) prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 # http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and if (resp.status_code == codes.see_other and
method != 'HEAD'): method != 'HEAD'):
method = 'GET' method = 'GET'
@ -138,7 +157,7 @@ class SessionRedirectMixin(object):
prepared_request.method = method prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084 # https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary, codes.resume): if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers: if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length'] del prepared_request.headers['Content-Length']
@ -154,22 +173,15 @@ class SessionRedirectMixin(object):
prepared_request._cookies.update(self.cookies) prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies) prepared_request.prepare_cookies(prepared_request._cookies)
if 'Authorization' in headers: # Rebuild auth and proxy information.
# If we get redirected to a new host, we should strip out any proxies = self.rebuild_proxies(prepared_request, proxies)
# authentication headers. self.rebuild_auth(prepared_request, resp)
original_parsed = urlparse(resp.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname): # Override the original request.
del headers['Authorization'] req = prepared_request
# .netrc might have more auth for us.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
resp = self.send( resp = self.send(
prepared_request, req,
stream=stream, stream=stream,
timeout=timeout, timeout=timeout,
verify=verify, verify=verify,
@ -183,6 +195,68 @@ class SessionRedirectMixin(object):
i += 1 i += 1
yield resp yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin): class Session(SessionRedirectMixin):
"""A Requests session. """A Requests session.
@ -198,9 +272,10 @@ class Session(SessionRedirectMixin):
""" """
__attrs__ = [ __attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks', 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'trust_env', 'max_redirects'] 'max_redirects', 'redirect_cache'
]
def __init__(self): def __init__(self):
@ -253,6 +328,9 @@ class Session(SessionRedirectMixin):
self.mount('https://', HTTPAdapter()) self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter()) self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(1000)
def __enter__(self): def __enter__(self):
return self return self
@ -290,6 +368,7 @@ class Session(SessionRedirectMixin):
url=request.url, url=request.url,
files=request.files, files=request.files,
data=request.data, data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params), params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth), auth=merge_setting(auth, self.auth),
@ -311,7 +390,8 @@ class Session(SessionRedirectMixin):
hooks=None, hooks=None,
stream=None, stream=None,
verify=None, verify=None,
cert=None): cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it. """Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object. Returns :class:`Response <Response>` object.
@ -321,17 +401,22 @@ class Session(SessionRedirectMixin):
string for the :class:`Request`. string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the :param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`. :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`. :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`. :class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects :param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload. for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable :param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth. Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the :param timeout: (optional) How long to wait for the server to send
request in seconds. data before giving up, as a float, or a (`connect timeout, read
:param allow_redirects: (optional) Boolean. Set to True by default. timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of :param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy. the proxy.
:param stream: (optional) whether to immediately download the response :param stream: (optional) whether to immediately download the response
@ -342,7 +427,7 @@ class Session(SessionRedirectMixin):
If Tuple, ('cert', 'key') pair. If Tuple, ('cert', 'key') pair.
""" """
method = builtin_str(method) method = to_native_string(method)
# Create the Request. # Create the Request.
req = Request( req = Request(
@ -351,6 +436,7 @@ class Session(SessionRedirectMixin):
headers = headers, headers = headers,
files = files, files = files,
data = data or {}, data = data or {},
json = json,
params = params or {}, params = params or {},
auth = auth, auth = auth,
cookies = cookies, cookies = cookies,
@ -360,36 +446,16 @@ class Session(SessionRedirectMixin):
proxies = proxies or {} proxies = proxies or {}
# Gather clues from the surrounding environment. settings = self.merge_environment_settings(
if self.trust_env: prep.url, proxies, stream, verify, cert
# Set environment's proxies. )
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not verify and verify is not False:
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
# Send the request. # Send the request.
send_kwargs = { send_kwargs = {
'stream': stream,
'timeout': timeout, 'timeout': timeout,
'verify': verify,
'cert': cert,
'proxies': proxies,
'allow_redirects': allow_redirects, 'allow_redirects': allow_redirects,
} }
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs) resp = self.send(prep, **send_kwargs)
return resp return resp
@ -424,15 +490,16 @@ class Session(SessionRedirectMixin):
kwargs.setdefault('allow_redirects', False) kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs) return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs): def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object. """Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object. :param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes. :param \*\*kwargs: Optional arguments that ``request`` takes.
""" """
return self.request('POST', url, data=data, **kwargs) return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs): def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object. """Sends a PUT request. Returns :class:`Response` object.
@ -477,6 +544,14 @@ class Session(SessionRedirectMixin):
if not isinstance(request, PreparedRequest): if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.') raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks # Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True) allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream') stream = kwargs.get('stream')
@ -527,10 +602,37 @@ class Session(SessionRedirectMixin):
history.insert(0, r) history.insert(0, r)
# Get the last request made # Get the last request made
r = history.pop() r = history.pop()
r.history = tuple(history) r.history = history
if not stream:
r.content
return r return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url): def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL.""" """Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items(): for (prefix, adapter) in self.adapters.items():

View file

@ -30,7 +30,8 @@ _codes = {
305: ('use_proxy',), 305: ('use_proxy',),
306: ('switch_proxy',), 306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'), 307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'), 308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error. # Client Error.
400: ('bad_request', 'bad'), 400: ('bad_request', 'bad'),

View file

@ -8,30 +8,7 @@ Data structures that power Requests.
""" """
import os
import collections import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping): class CaseInsensitiveDict(collections.MutableMapping):
@ -46,7 +23,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
case of the last key to be set, and ``iter(instance)``, case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains will contain case-sensitive keys. However, querying and contains
testing is case insensitive: testing is case insensitive::
cid = CaseInsensitiveDict() cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json' cid['Accept'] = 'application/json'
@ -106,8 +83,7 @@ class CaseInsensitiveDict(collections.MutableMapping):
return CaseInsensitiveDict(self._store.values()) return CaseInsensitiveDict(self._store.values())
def __repr__(self): def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items())) return str(dict(self.items()))
class LookupDict(dict): class LookupDict(dict):
"""Dictionary lookup object.""" """Dictionary lookup object."""

View file

@ -19,15 +19,16 @@ import re
import sys import sys
import socket import socket
import struct import struct
import warnings
from . import __version__ from . import __version__
from . import certs from . import certs
from .compat import parse_http_list as _parse_list_header from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2, from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass) builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict from .structures import CaseInsensitiveDict
from .exceptions import MissingSchema, InvalidURL from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,) _hush_pyflakes = (RequestsCookieJar,)
@ -61,7 +62,7 @@ def super_len(o):
return os.fstat(fileno).st_size return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'): if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringI # e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue()) return len(o.getvalue())
@ -287,6 +288,11 @@ def get_encodings_from_content(content):
:param content: bytestring to extract encodings from. :param content: bytestring to extract encodings from.
""" """
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
@ -351,12 +357,14 @@ def get_unicode_from_response(r):
Tried: Tried:
1. charset from content-type 1. charset from content-type
2. fall back and replace all unicode characters
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
""" """
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = [] tried_encodings = []
@ -466,9 +474,10 @@ def is_valid_cidr(string_network):
return True return True
def get_environ_proxies(url): def should_bypass_proxies(url):
"""Return a dict of environment proxies.""" """
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL # First check whether no_proxy is defined. If it is, check that the URL
@ -486,13 +495,13 @@ def get_environ_proxies(url):
for proxy_ip in no_proxy: for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip): if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip): if address_in_network(ip, proxy_ip):
return {} return True
else: else:
for host in no_proxy: for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host): if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want # The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL. # to apply the proxies on this URL.
return {} return True
# If the system proxy settings indicate that this URL should be bypassed, # If the system proxy settings indicate that this URL should be bypassed,
# don't proxy. # don't proxy.
@ -506,12 +515,16 @@ def get_environ_proxies(url):
bypass = False bypass = False
if bypass: if bypass:
return {} return True
# If we get here, we either didn't have no_proxy set or we're not going return False
# anywhere that no_proxy applies to, and the system settings don't require
# bypassing the proxy for the current URL. def get_environ_proxies(url):
return getproxies() """Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"): def default_user_agent(name="python-requests"):
@ -549,7 +562,8 @@ def default_headers():
return CaseInsensitiveDict({ return CaseInsensitiveDict({
'User-Agent': default_user_agent(), 'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')), 'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*' 'Accept': '*/*',
'Connection': 'keep-alive',
}) })
@ -564,7 +578,7 @@ def parse_header_links(value):
replace_chars = " '\"" replace_chars = " '\""
for val in value.split(","): for val in re.split(", *<", value):
try: try:
url, params = val.split(";", 1) url, params = val.split(";", 1)
except ValueError: except ValueError:
@ -622,13 +636,18 @@ def guess_json_utf(data):
return None return None
def except_on_missing_scheme(url): def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL, raise a MissingSchema exception if the scheme is missing. '''Given a URL that may or may not have a scheme, prepend the given scheme.
""" Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url) scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
if not scheme: # urlparse is a finicky beast, and sometimes decides that there isn't a
raise MissingSchema('Proxy URLs must have explicit schemes.') # netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url): def get_auth_from_url(url):
@ -661,3 +680,18 @@ def to_native_string(string, encoding='ascii'):
out = string.decode(encoding) out = string.decode(encoding)
return out return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))

View file

@ -871,8 +871,10 @@ class Tvdb:
url = self.config['url_epInfo'] % (sid, language) url = self.config['url_epInfo'] % (sid, language)
epsEt = self._getetsrc(url, language=language) epsEt = self._getetsrc(url, language=language)
if 'episode' not in epsEt:
return False
episodes = epsEt["episode"] episodes = epsEt['episode']
if not isinstance(episodes, list): if not isinstance(episodes, list):
episodes = [episodes] episodes = [episodes]
@ -936,7 +938,7 @@ class Tvdb:
# Item is integer, treat as show id # Item is integer, treat as show id
if key not in self.shows: if key not in self.shows:
self._getShowData(key, self.config['language'], True) self._getShowData(key, self.config['language'], True)
return self.shows[key] return (None, self.shows[key])[key in self.shows]
key = str(key).lower() key = str(key).lower()
self.config['searchterm'] = key self.config['searchterm'] = key

View file

@ -29,7 +29,7 @@ except ImportError:
import xml.etree.ElementTree as ElementTree import xml.etree.ElementTree as ElementTree
from lib.dateutil.parser import parse from lib.dateutil.parser import parse
from cachecontrol import CacheControl, caches from lib.cachecontrol import CacheControl, caches
from tvrage_ui import BaseUI from tvrage_ui import BaseUI
from tvrage_exceptions import (tvrage_error, tvrage_userabort, tvrage_shownotfound, from tvrage_exceptions import (tvrage_error, tvrage_userabort, tvrage_shownotfound,
@ -97,15 +97,12 @@ class ShowContainer(dict):
#keep only the 100th latest results #keep only the 100th latest results
if time.time() - self._lastgc > 20: if time.time() - self._lastgc > 20:
tbd = self._stack[:-100] for o in self._stack[:-100]:
i = 0
for o in tbd:
del self[o] del self[o]
del self._stack[i]
i += 1
_lastgc = time.time() self._stack = self._stack[-100:]
del tbd
self._lastgc = time.time()
super(ShowContainer, self).__setitem__(key, value) super(ShowContainer, self).__setitem__(key, value)
@ -604,6 +601,8 @@ class TVRage:
self.config['params_epInfo']['sid'] = sid self.config['params_epInfo']['sid'] = sid
epsEt = self._getetsrc(self.config['url_epInfo'], self.config['params_epInfo']) epsEt = self._getetsrc(self.config['url_epInfo'], self.config['params_epInfo'])
if 'episodelist' not in epsEt and 'season' not in epsEt['episodelist']:
return False
seasons = epsEt['episodelist']['season'] seasons = epsEt['episodelist']['season']
if not isinstance(seasons, list): if not isinstance(seasons, list):
@ -658,7 +657,7 @@ class TVRage:
# Item is integer, treat as show id # Item is integer, treat as show id
if key not in self.shows: if key not in self.shows:
self._getShowData(key, True) self._getShowData(key, True)
return self.shows[key] return (None, self.shows[key])[key in self.shows]
key = key.lower() key = key.lower()
self.config['searchterm'] = key self.config['searchterm'] = key

View file

@ -33,8 +33,8 @@ sys.path.append(os.path.abspath('../lib'))
from sickbeard import providers, metadata, config, webserveInit from sickbeard import providers, metadata, config, webserveInit
from sickbeard.providers.generic import GenericProvider from sickbeard.providers.generic import GenericProvider
from providers import ezrss, tvtorrents, btn, newznab, womble, thepiratebay, torrentleech, kat, iptorrents, \ from providers import ezrss, tvtorrents, btn, newznab, womble, thepiratebay, torrentleech, kat, iptorrents, \
omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, nextgen, speedcd, nyaatorrents, fanzub, torrentbytes, animezb, \ omgwtfnzbs, scc, hdtorrents, torrentday, hdbits, nextgen, speedcd, nyaatorrents, fanzub, torrentbytes, \
freshontv, bitsoup, t411, tokyotoshokan freshontv, bitsoup, tokyotoshokan
from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \ from sickbeard.config import CheckSection, check_setting_int, check_setting_str, check_setting_float, ConfigMigrator, \
naming_ep_type naming_ep_type
from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \ from sickbeard import searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser, \
@ -346,6 +346,7 @@ ANIDB_PASSWORD = None
ANIDB_USE_MYLIST = False ANIDB_USE_MYLIST = False
ADBA_CONNECTION = None ADBA_CONNECTION = None
ANIME_SPLIT_HOME = False ANIME_SPLIT_HOME = False
ANIME_TREAT_AS_HDTV = False
USE_SYNOINDEX = False USE_SYNOINDEX = False
@ -504,7 +505,7 @@ def initialize(consoleLogging=True):
USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, PROXY_INDEXERS, \ USE_FAILED_DOWNLOADS, DELETE_FAILED, ANON_REDIRECT, LOCALHOST_IP, TMDB_API_KEY, DEBUG, PROXY_SETTING, PROXY_INDEXERS, \
AUTOPOSTPROCESSER_FREQUENCY, DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \ AUTOPOSTPROCESSER_FREQUENCY, DEFAULT_AUTOPOSTPROCESSER_FREQUENCY, MIN_AUTOPOSTPROCESSER_FREQUENCY, \
ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \ ANIME_DEFAULT, NAMING_ANIME, ANIMESUPPORT, USE_ANIDB, ANIDB_USERNAME, ANIDB_PASSWORD, ANIDB_USE_MYLIST, \
ANIME_SPLIT_HOME, SCENE_DEFAULT, BACKLOG_DAYS ANIME_SPLIT_HOME, SCENE_DEFAULT, BACKLOG_DAYS, ANIME_TREAT_AS_HDTV
if __INITIALIZED__: if __INITIALIZED__:
return False return False
@ -928,6 +929,7 @@ def initialize(consoleLogging=True):
ANIDB_USE_MYLIST = bool(check_setting_int(CFG, 'ANIDB', 'anidb_use_mylist', 0)) ANIDB_USE_MYLIST = bool(check_setting_int(CFG, 'ANIDB', 'anidb_use_mylist', 0))
ANIME_SPLIT_HOME = bool(check_setting_int(CFG, 'ANIME', 'anime_split_home', 0)) ANIME_SPLIT_HOME = bool(check_setting_int(CFG, 'ANIME', 'anime_split_home', 0))
ANIME_TREAT_AS_HDTV = bool(check_setting_int(CFG, 'ANIME', 'anime_treat_as_hdtv', 0))
METADATA_XBMC = check_setting_str(CFG, 'General', 'metadata_xbmc', '0|0|0|0|0|0|0|0|0|0') METADATA_XBMC = check_setting_str(CFG, 'General', 'metadata_xbmc', '0|0|0|0|0|0|0|0|0|0')
METADATA_XBMC_12PLUS = check_setting_str(CFG, 'General', 'metadata_xbmc_12plus', '0|0|0|0|0|0|0|0|0|0') METADATA_XBMC_12PLUS = check_setting_str(CFG, 'General', 'metadata_xbmc_12plus', '0|0|0|0|0|0|0|0|0|0')
@ -1794,6 +1796,7 @@ def save_config():
new_config['ANIME'] = {} new_config['ANIME'] = {}
new_config['ANIME']['anime_split_home'] = int(ANIME_SPLIT_HOME) new_config['ANIME']['anime_split_home'] = int(ANIME_SPLIT_HOME)
new_config['ANIME']['anime_treat_as_hdtv'] = int(ANIME_TREAT_AS_HDTV)
new_config.write() new_config.write()

View file

@ -22,6 +22,9 @@ import platform
import re import re
import uuid import uuid
import logger
import sickbeard
INSTANCE_ID = str(uuid.uuid1()) INSTANCE_ID = str(uuid.uuid1())
USER_AGENT = ('SickGear/(' + platform.system() + '; ' + platform.release() + '; ' + INSTANCE_ID + ')') USER_AGENT = ('SickGear/(' + platform.system() + '; ' + platform.release() + '; ' + INSTANCE_ID + ')')
@ -202,6 +205,10 @@ class Quality:
return Quality.HDBLURAY return Quality.HDBLURAY
elif blueRayOptions and fullHD and not hdOptions: elif blueRayOptions and fullHD and not hdOptions:
return Quality.FULLHDBLURAY return Quality.FULLHDBLURAY
elif sickbeard.ANIME_TREAT_AS_HDTV:
logger.log(u'Treating file: ' + name + ' with "unknown" quality as HDTV per user settings',
logger.DEBUG)
return Quality.HDTV
else: else:
return Quality.UNKNOWN return Quality.UNKNOWN

View file

@ -62,7 +62,7 @@ from sickbeard import encodingKludge as ek
from sickbeard import notifiers from sickbeard import notifiers
from sickbeard import clients from sickbeard import clients
from cachecontrol import CacheControl, caches from lib.cachecontrol import CacheControl, caches
from itertools import izip, cycle from itertools import izip, cycle
urllib._urlopener = classes.SickBeardURLopener() urllib._urlopener = classes.SickBeardURLopener()
@ -1164,8 +1164,13 @@ def mapIndexersToShow(showObj):
# for each mapped entry # for each mapped entry
for curResult in sqlResults: for curResult in sqlResults:
logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG) nlist = [i for i in curResult if None is not i]
mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id']) # Check if its mapped with both tvdb and tvrage.
if 4 <= len(nlist):
logger.log(u"Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
mapped[int(curResult['mindexer'])] = int(curResult['mindexer_id'])
break
else: else:
sql_l = [] sql_l = []
for indexer in sickbeard.indexerApi().indexers: for indexer in sickbeard.indexerApi().indexers:
@ -1423,3 +1428,6 @@ def get_size(start_path='.'):
total_size += ek.ek(os.path.getsize, fp) total_size += ek.ek(os.path.getsize, fp)
return total_size return total_size
def remove_article(text=''):
return re.sub(r'(?i)/^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)

View file

@ -34,10 +34,8 @@ __all__ = ['ezrss',
'nyaatorrents', 'nyaatorrents',
'fanzub', 'fanzub',
'torrentbytes', 'torrentbytes',
'animezb',
'freshontv', 'freshontv',
'bitsoup', 'bitsoup',
't411',
'tokyotoshokan', 'tokyotoshokan',
] ]

View file

@ -1,155 +0,0 @@
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import datetime
import sickbeard
import generic
from sickbeard import classes, show_name_helpers, helpers
from sickbeard import exceptions, logger
from sickbeard.common import *
from sickbeard import tvcache
from lib.dateutil.parser import parse as parseDate
class Animezb(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Animezb")
self.supportsBacklog = False
self.supportsAbsoluteNumbering = True
self.anime_only = True
self.enabled = False
self.cache = AnimezbCache(self)
self.url = 'https://animezb.com/'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'animezb.png'
def _get_season_search_strings(self, ep_obj):
return [x for x in show_name_helpers.makeSceneSeasonSearchString(self.show, ep_obj)]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = []
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = '+'.join(
[helpers.sanitizeSceneName(show_name).replace('.', '+'), str(ep_obj.scene_absolute_number).zfill(2)])
search_string.append(ep_string)
return search_string
def _doSearch(self, search_string, epcount=0, age=0):
if self.show and not self.show.is_anime:
logger.log(u"" + str(self.show.name) + " is not an anime skiping ...")
return []
params = {
"cat": "anime",
"q": search_string.encode('utf-8'),
"max": "100"
}
search_url = self.url + "rss?" + urllib.urlencode(params)
logger.log(u"Search url: " + search_url, logger.DEBUG)
data = self.cache.getRSSFeed(search_url)
if not data:
return []
if 'entries' in data:
items = data.entries
results = []
for curItem in items:
(title, url) = self._get_title_and_url(curItem)
if title and url:
results.append(curItem)
else:
logger.log(
u"The data returned from the " + self.name + " is incomplete, this result is unusable",
logger.DEBUG)
return results
return []
def findPropers(self, date=None):
results = []
for item in self._doSearch("v2 OR v3 OR v4 OR v5"):
(title, url) = self._get_title_and_url(item)
if item.has_key('published_parsed') and item['published_parsed']:
result_date = item.published_parsed
if result_date:
result_date = datetime.datetime(*result_date[0:6])
else:
logger.log(u"Unable to figure out the date for entry " + title + ", skipping it")
continue
if not date or result_date > date:
search_result = classes.Proper(title, url, result_date, self.show)
results.append(search_result)
return results
class AnimezbCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Animezb every 20 minutes max
self.minTime = 20
def _getRSSData(self):
params = {
"cat": "anime".encode('utf-8'),
"max": "100".encode('utf-8')
}
rss_url = self.provider.url + 'rss?' + urllib.urlencode(params)
logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG)
data = self.getRSSFeed(rss_url)
if data and 'entries' in data:
return data.entries
else:
return []
provider = Animezb()

View file

@ -1,295 +0,0 @@
# -*- coding: latin-1 -*-
# Author: djoole <bobby.djoole@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import traceback
import time
import re
import datetime
import sickbeard
import generic
from lib import requests
from sickbeard.common import USER_AGENT, Quality, cpu_presets
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import show_name_helpers
from sickbeard.bs4_parser import BS4Parser
from sickbeard import db
class T411Provider(generic.TorrentProvider):
urls = {'base_url': 'http://www.t411.me/',
'search': 'http://www.t411.me/torrents/search/?name=%s&cat=210&subcat=433&search=%s&submit=Recherche',
'login_page': 'http://www.t411.me/users/login/',
'download': 'http://www.t411.me/torrents/download/?id=%s',
}
def __init__(self):
generic.TorrentProvider.__init__(self, "T411")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.cache = T411Cache(self)
self.url = self.urls['base_url']
self.last_login_check = None
self.login_opener = None
def isEnabled(self):
return self.enabled
def imageName(self):
return 't411.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def getLoginParams(self):
return {
'login': self.username,
'password': self.password,
'remember': '1',
}
def loginSuccess(self, output):
if "<span>Ratio: <strong class" in output.text:
return True
else:
return False
def _doLogin(self):
now = time.time()
if self.login_opener and self.last_login_check < (now - 3600):
try:
output = self.login_opener.open(self.urls['test'])
if self.loginSuccess(output):
self.last_login_check = now
return True
else:
self.login_opener = None
except:
self.login_opener = None
if self.login_opener:
return True
try:
login_params = self.getLoginParams()
self.session = requests.Session()
self.session.headers.update({'User-Agent': USER_AGENT})
data = self.session.get(self.urls['login_page'], verify=False)
output = self.session.post(self.urls['login_page'], data=login_params, verify=False)
if self.loginSuccess(output):
self.last_login_check = now
self.login_opener = self.session
return True
error = 'unknown'
except:
error = traceback.format_exc()
self.login_opener = None
self.login_opener = None
logger.log(u'Failed to login:' + str(error), logger.ERROR)
return False
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) #1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) + '.' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode}
search_string['Episode'].append(re.sub('\s+', '.', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return []
for mode in search_params.keys():
for search_string in search_params[mode]:
if search_string == '':
search_string2 = ''
else:
search_string2 = '%40name+' + search_string + '+'
searchURL = self.urls['search'] % (search_string, search_string2)
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
with BS4Parser(data.decode('iso-8859-1'), features=["html5lib", "permissive"]) as html:
resultsTable = html.find('table', attrs={'class': 'results'})
if not resultsTable:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)
continue
entries = resultsTable.find("tbody").findAll("tr")
if len(entries) > 0:
for result in entries:
try:
link = result.find('a', title=True)
torrentName = link['title']
torrent_name = str(torrentName)
torrentId = result.find_all('td')[2].find_all('a')[0]['href'][1:].replace('torrents/nfo/?id=','')
torrent_download_url = (self.urls['download'] % torrentId).encode('utf8')
except (AttributeError, TypeError):
continue
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_download_url + ")", logger.DEBUG)
items[mode].append(item)
else:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(),
logger.ERROR)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&amp;', '&')
return title, url
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class T411Cache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll T411 every 10 minutes max
self.minTime = 10
def _getDailyData(self):
search_params = {'RSS': ['']}
return self.provider._doSearch(search_params)
provider = T411Provider()

View file

@ -85,7 +85,7 @@ class BacklogSearcher:
fromDate = datetime.date.fromordinal(1) fromDate = datetime.date.fromordinal(1)
if not which_shows and not curDate - self._lastBacklog >= self.cycleTime: if not which_shows and not curDate - self._lastBacklog >= self.cycleTime:
logger.log(u"Running limited backlog on missed episodes " + str(sickbeard.BACKLOG_DAYS) + " day(s) and older only") logger.log(u'Running limited backlog for episodes missed during the last %s day(s)' % str(sickbeard.BACKLOG_DAYS))
fromDate = datetime.date.today() - datetime.timedelta(days=sickbeard.BACKLOG_DAYS) fromDate = datetime.date.today() - datetime.timedelta(days=sickbeard.BACKLOG_DAYS)
self.amActive = True self.amActive = True

View file

@ -2494,7 +2494,7 @@ class TVEpisode(object):
if airs: if airs:
hr = int(airs.group(1)) hr = int(airs.group(1))
hr = (12 + hr, hr)[None is airs.group(3)] hr = (12 + hr, hr)[None is airs.group(3)]
hr = (hr, hr - 12)[0 == hr % 12] hr = (hr, hr - 12)[0 == hr % 12 and 0 != hr]
min = int((airs.group(2), min)[None is airs.group(2)]) min = int((airs.group(2), min)[None is airs.group(2)])
airtime = datetime.time(hr, min) airtime = datetime.time(hr, min)

View file

@ -44,9 +44,7 @@ class CheckVersion():
def __init__(self): def __init__(self):
self.install_type = self.find_install_type() self.install_type = self.find_install_type()
if self.install_type == 'win': if self.install_type == 'git':
self.updater = WindowsUpdateManager()
elif self.install_type == 'git':
self.updater = GitUpdateManager() self.updater = GitUpdateManager()
elif self.install_type == 'source': elif self.install_type == 'source':
self.updater = SourceUpdateManager() self.updater = SourceUpdateManager()
@ -71,15 +69,11 @@ class CheckVersion():
Determines how this copy of sr was installed. Determines how this copy of sr was installed.
returns: type of installation. Possible values are: returns: type of installation. Possible values are:
'win': any compiled windows build
'git': running from source using git 'git': running from source using git
'source': running from source without git 'source': running from source without git
""" """
# check if we're a windows build if os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
if sickbeard.BRANCH.startswith('build '):
install_type = 'win'
elif os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')):
install_type = 'git' install_type = 'git'
else: else:
install_type = 'source' install_type = 'source'
@ -139,158 +133,6 @@ class UpdateManager():
return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID) return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID)
class WindowsUpdateManager(UpdateManager):
def __init__(self):
self.github_repo_user = self.get_github_repo_user()
self.github_repo = self.get_github_repo()
self.branch = sickbeard.BRANCH
if sickbeard.BRANCH == '':
self.branch = self._find_installed_branch()
self._cur_version = None
self._cur_commit_hash = None
self._newest_version = None
self.gc_url = 'http://code.google.com/p/sickbeard/downloads/list'
self.version_url = 'https://raw.github.com/' + self.github_repo_user + '/' + self.github_repo + '/' + self.branch + '/updates.txt'
def _find_installed_version(self):
version = ''
try:
version = sickbeard.BRANCH
return int(version[6:])
except ValueError:
logger.log(u"Unknown SickGear Windows binary release: " + version, logger.ERROR)
return None
def _find_installed_branch(self):
return 'windows_binaries'
def _find_newest_version(self, whole_link=False):
"""
Checks git for the newest Windows binary build. Returns either the
build number or the entire build URL depending on whole_link's value.
whole_link: If True, returns the entire URL to the release. If False, it returns
only the build number. default: False
"""
regex = ".*SickGear\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip"
version_url_data = helpers.getURL(self.version_url)
if not version_url_data:
return
for curLine in version_url_data.splitlines():
logger.log(u"checking line " + curLine, logger.DEBUG)
match = re.match(regex, curLine)
if match:
logger.log(u"found a match", logger.DEBUG)
if whole_link:
return curLine.strip()
else:
return int(match.group(1))
def need_update(self):
if self.branch != self._find_installed_branch():
logger.log(u"Branch checkout: " + self._find_installed_branch() + "->" + self.branch, logger.DEBUG)
return True
self._cur_version = self._find_installed_version()
self._newest_version = self._find_newest_version()
logger.log(u"newest version: " + repr(self._newest_version), logger.DEBUG)
if self._newest_version and self._newest_version > self._cur_version:
return True
return False
def set_newest_text(self):
sickbeard.NEWEST_VERSION_STRING = None
if not self._cur_version:
newest_text = "Unknown SickGear Windows binary version. Not updating with original version."
else:
newest_text = 'There is a <a href="' + self.gc_url + '" onclick="window.open(this.href); return false;">newer version available</a> (build ' + str(
self._newest_version) + ')'
newest_text += "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>"
sickbeard.NEWEST_VERSION_STRING = newest_text
def update(self):
zip_download_url = self._find_newest_version(True)
logger.log(u"new_link: " + repr(zip_download_url), logger.DEBUG)
if not zip_download_url:
logger.log(u"Unable to find a new version link on google code, not updating")
return False
try:
# prepare the update dir
sr_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sr-update')
if os.path.isdir(sr_update_dir):
logger.log(u"Clearing out update folder " + sr_update_dir + " before extracting")
shutil.rmtree(sr_update_dir)
logger.log(u"Creating update folder " + sr_update_dir + " before extracting")
os.makedirs(sr_update_dir)
# retrieve file
logger.log(u"Downloading update from " + zip_download_url)
zip_download_path = os.path.join(sr_update_dir, u'sr-update.zip')
urllib.urlretrieve(zip_download_url, zip_download_path)
if not ek.ek(os.path.isfile, zip_download_path):
logger.log(u"Unable to retrieve new version from " + zip_download_url + ", can't update", logger.ERROR)
return False
if not ek.ek(zipfile.is_zipfile, zip_download_path):
logger.log(u"Retrieved version from " + zip_download_url + " is corrupt, can't update", logger.ERROR)
return False
# extract to sr-update dir
logger.log(u"Unzipping from " + str(zip_download_path) + " to " + sr_update_dir)
update_zip = zipfile.ZipFile(zip_download_path, 'r')
update_zip.extractall(sr_update_dir)
update_zip.close()
# delete the zip
logger.log(u"Deleting zip file from " + str(zip_download_path))
os.remove(zip_download_path)
# find update dir name
update_dir_contents = [x for x in os.listdir(sr_update_dir) if
os.path.isdir(os.path.join(sr_update_dir, x))]
if len(update_dir_contents) != 1:
logger.log(u"Invalid update data, update failed. Maybe try deleting your sr-update folder?",
logger.ERROR)
return False
content_dir = os.path.join(sr_update_dir, update_dir_contents[0])
old_update_path = os.path.join(content_dir, u'updater.exe')
new_update_path = os.path.join(sickbeard.PROG_DIR, u'updater.exe')
logger.log(u"Copying new update.exe file from " + old_update_path + " to " + new_update_path)
shutil.move(old_update_path, new_update_path)
# Notify update successful
notifiers.notify_git_update(sickbeard.NEWEST_VERSION_STRING)
except Exception, e:
logger.log(u"Error while trying to update: " + ex(e), logger.ERROR)
return False
return True
def list_remote_branches(self):
return ['windows_binaries']
class GitUpdateManager(UpdateManager): class GitUpdateManager(UpdateManager):
def __init__(self): def __init__(self):
self._git_path = self._find_working_git() self._git_path = self._find_working_git()

View file

@ -37,6 +37,7 @@ from sickbeard import processTV
from sickbeard import network_timezones, sbdatetime from sickbeard import network_timezones, sbdatetime
from sickbeard.exceptions import ex from sickbeard.exceptions import ex
from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN
from sickbeard.helpers import remove_article
from common import Quality, qualityPresetStrings, statusStrings from common import Quality, qualityPresetStrings, statusStrings
try: try:
@ -125,15 +126,7 @@ class Api(webserve.MainHandler):
t = webserve.PageTemplate(headers=self.request.headers, file="apiBuilder.tmpl") t = webserve.PageTemplate(headers=self.request.headers, file="apiBuilder.tmpl")
def titler(x): def titler(x):
if not x or sickbeard.SORT_ARTICLE: return (remove_article(x), x)[not x or sickbeard.SORT_ARTICLE]
return x
if x.lower().startswith('a '):
x = x[2:]
elif x.lower().startswith('an '):
x = x[3:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name))) t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
@ -725,17 +718,22 @@ class CMD_ComingEpisodes(ApiCall):
def run(self): def run(self):
""" display the coming episodes """ """ display the coming episodes """
today = datetime.date.today().toordinal() today_dt = datetime.date.today()
next_week = (datetime.date.today() + datetime.timedelta(days=7)).toordinal() today = today_dt.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal() yesterday_dt = today_dt - datetime.timedelta(days=1)
yesterday = yesterday_dt.toordinal()
tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
next_week_dt = (datetime.date.today() + datetime.timedelta(days=7))
next_week = (next_week_dt + datetime.timedelta(days=1)).toordinal()
recently = (yesterday_dt - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = [] done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
myDB = db.DBConnection(row_type="dict") myDB = db.DBConnection()
sql_results = myDB.select( sql_results = myDB.select(
"SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'indexerid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join( "SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'indexerid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate <= ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, next_week] + qualList) ['?'] * len(qualList)) + ")", [yesterday, next_week] + qualList)
for cur_result in sql_results: for cur_result in sql_results:
done_show_list.append(int(cur_result["indexerid"])) done_show_list.append(int(cur_result["indexerid"]))
@ -748,17 +746,42 @@ class CMD_ComingEpisodes(ApiCall):
sql_results += more_sql_results sql_results += more_sql_results
more_sql_results = myDB.select( more_sql_results = myDB.select(
"SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'indexerid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join( "SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'indexerid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate <= ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) ['?'] * len(qualList)) + ")", [tomorrow, recently, WANTED] + qualList)
sql_results += more_sql_results sql_results += more_sql_results
# sort by air date sql_results = list(set(sql_results))
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results]
# multi dimension sort
sorts = { sorts = {
'date': (lambda x, y: cmp(int(x["airdate"]), int(y["airdate"]))), 'date': (lambda a, b: cmp(
'show': (lambda a, b: cmp(a["show_name"], b["show_name"])), (a['parsed_datetime'],
'network': (lambda a, b: cmp(a["network"], b["network"])), (a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['season'], a['episode']),
(b['parsed_datetime'],
(b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['season'], b['episode']))),
'show': (lambda a, b: cmp(
((a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['parsed_datetime'], a['season'], a['episode']),
((b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['parsed_datetime'], b['season'], b['episode']))),
'network': (lambda a, b: cmp(
(a['network'], a['parsed_datetime'],
(a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['season'], a['episode']),
(b['network'], b['parsed_datetime'],
(b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['season'], b['episode'])))
} }
# add parsed_datetime to the dict
for index, item in enumerate(sql_results):
sql_results[index]['parsed_datetime'] = network_timezones.parse_date_time(item['airdate'], item['airs'], item['network'])
sql_results.sort(sorts[self.sort]) sql_results.sort(sorts[self.sort])
finalEpResults = {} finalEpResults = {}
@ -777,9 +800,7 @@ class CMD_ComingEpisodes(ApiCall):
if ep["paused"] and not self.paused: if ep["paused"] and not self.paused:
continue continue
ep['airs'] = str(ep['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ') ep['airdate'] = int(ep["airdate"])
dtEpisodeAirs = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(int(ep['airdate']), ep['airs'], ep['network']))
ep['airdate'] = dtEpisodeAirs.toordinal()
status = "soon" status = "soon"
if ep["airdate"] < today: if ep["airdate"] < today:
@ -801,12 +822,13 @@ class CMD_ComingEpisodes(ApiCall):
ep["quality"] = _get_quality_string(ep["quality"]) ep["quality"] = _get_quality_string(ep["quality"])
# clean up tvdb horrible airs field # clean up tvdb horrible airs field
ep['airs'] = sbdatetime.sbdatetime.sbftime(dtEpisodeAirs, t_preset=timeFormat).lstrip('0').replace(' 0', ' ') ep['airs'] = str(ep['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
# start day of the week on 1 (monday) # start day of the week on 1 (monday)
ep['weekday'] = 1 + datetime.date.fromordinal(dtEpisodeAirs.toordinal()).weekday() ep['weekday'] = 1 + datetime.date.fromordinal(ep['airdate']).weekday()
# Add tvdbid for backward compability # Add tvdbid for backward compability
ep["tvdbid"] = ep['indexerid'] ep["tvdbid"] = ep['indexerid']
ep['airdate'] = sbdatetime.sbdatetime.sbfdate(dtEpisodeAirs, d_preset=dateFormat) ep['airdate'] = sbdatetime.sbdatetime.sbfdate(datetime.date.fromordinal(ep['airdate']), d_preset=dateFormat)
ep['parsed_datetime'] = sbdatetime.sbdatetime.sbfdatetime(ep['parsed_datetime'], d_preset=dateFormat, t_preset='%H:%M %z')
# TODO: check if this obsolete # TODO: check if this obsolete
if not status in finalEpResults: if not status in finalEpResults:
@ -985,7 +1007,7 @@ class CMD_EpisodeSetStatus(ApiCall):
with epObj.lock: with epObj.lock:
if self.status == WANTED: if self.status == WANTED:
# figure out what episodes are wanted so we can backlog them # figure out what episodes are wanted so we can backlog them
if epObj.season in ep_segment: if epObj.season in segments:
segments[epObj.season].append(epObj) segments[epObj.season].append(epObj)
else: else:
segments[epObj.season] = [epObj] segments[epObj.season] = [epObj]

View file

@ -52,6 +52,7 @@ from sickbeard.common import Quality, Overview, statusStrings, qualityPresetStri
from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILED from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILED
from sickbeard.common import SD, HD720p, HD1080p from sickbeard.common import SD, HD720p, HD1080p
from sickbeard.exceptions import ex from sickbeard.exceptions import ex
from sickbeard.helpers import remove_article
from sickbeard.scene_exceptions import get_scene_exceptions from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \ from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \
get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \ get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \
@ -350,42 +351,67 @@ class MainHandler(RequestHandler):
redirect("/comingEpisodes/") redirect("/comingEpisodes/")
def comingEpisodes(self, layout="None"): def comingEpisodes(self, layout="None"):
""" display the coming episodes """
today1 = datetime.date.today() today_dt = datetime.date.today()
today = today1.toordinal() #today = today_dt.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7)) yesterday_dt = today_dt - datetime.timedelta(days=1)
next_week = next_week1.toordinal() yesterday = yesterday_dt.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal() tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
next_week_dt = (datetime.date.today() + datetime.timedelta(days=7))
next_week = (next_week_dt + datetime.timedelta(days=1)).toordinal()
if not (layout and layout in ('calendar')) and not (sickbeard.COMING_EPS_LAYOUT and sickbeard.COMING_EPS_LAYOUT in ('calendar')):
recently = (yesterday_dt - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
else:
recently = yesterday
done_show_list = [] done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
myDB = db.DBConnection() myDB = db.DBConnection()
sql_results = myDB.select( sql_results = myDB.select(
"SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join( "SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate <= ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, next_week] + qualList) ['?'] * len(qualList)) + ")", [yesterday, next_week] + qualList)
for cur_result in sql_results: for cur_result in sql_results:
done_show_list.append(int(cur_result["showid"])) done_show_list.append(int(cur_result["showid"]))
more_sql_results = myDB.select( if not (layout and layout in ('calendar')) and not (sickbeard.COMING_EPS_LAYOUT and sickbeard.COMING_EPS_LAYOUT in ('calendar')):
"SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join( more_sql_results = myDB.select(
['?'] * len( "SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join(
done_show_list)) + ") AND tv_shows.indexer_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season != 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join( ['?'] * len(
['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list)) + ") AND tv_shows.indexer_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season != 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(
done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED) ['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")",
sql_results += more_sql_results done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results += more_sql_results
more_sql_results = myDB.select( more_sql_results = myDB.select(
"SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join( "SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate <= ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) ['?'] * len(qualList)) + ")", [tomorrow, recently, WANTED] + qualList)
sql_results += more_sql_results sql_results += more_sql_results
# sort by localtime sql_results = list(set(sql_results))
# multi dimension sort
sorts = { sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])), 'date': (lambda a, b: cmp(
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))), (a['localtime'],
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))), (a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['season'], a['episode']),
(b['localtime'],
(b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['season'], b['episode']))),
'show': (lambda a, b: cmp(
((a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['localtime'], a['season'], a['episode']),
((b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['localtime'], b['season'], b['episode']))),
'network': (lambda a, b: cmp(
(a['network'], a['localtime'],
(a['show_name'], remove_article(a['show_name']))[not sickbeard.SORT_ARTICLE],
a['season'], a['episode']),
(b['network'], b['localtime'],
(b['show_name'], remove_article(b['show_name']))[not sickbeard.SORT_ARTICLE],
b['season'], b['episode'])))
} }
# make a dict out of the sql results # make a dict out of the sql results
@ -418,8 +444,8 @@ class MainHandler(RequestHandler):
paused_item, paused_item,
] ]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=network_timezones.sb_timezone)) t.next_week = datetime.datetime.combine(next_week_dt, datetime.time(tzinfo=network_timezones.sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=network_timezones.sb_timezone) t.today = datetime.datetime.now(network_timezones.sb_timezone)
t.sql_results = sql_results t.sql_results = sql_results
# Allow local overriding of layout parameter # Allow local overriding of layout parameter
@ -2642,7 +2668,7 @@ class ConfigAnime(MainHandler):
def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None, def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None,
split_home=None): split_home=None, anime_treat_as_hdtv=None):
results = [] results = []
@ -2651,6 +2677,7 @@ class ConfigAnime(MainHandler):
sickbeard.ANIDB_PASSWORD = anidb_password sickbeard.ANIDB_PASSWORD = anidb_password
sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist) sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist)
sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home) sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home)
sickbeard.ANIME_TREAT_AS_HDTV = config.checkbox_to_value(anime_treat_as_hdtv)
sickbeard.save_config() sickbeard.save_config()
@ -2885,8 +2912,7 @@ class NewHomeAddShows(MainHandler):
return _munge(t) return _munge(t)
def newShow(self, show_to_add=None, other_shows=None, use_show_name=None):
def newShow(self, show_to_add=None, other_shows=None):
""" """
Display the new show page which collects a tvdb id, folder, and extra options and Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow posts them to addNewShow
@ -2905,7 +2931,9 @@ class NewHomeAddShows(MainHandler):
t.use_provided_info = use_provided_info t.use_provided_info = use_provided_info
# use the given show_dir for the indexer search if available # use the given show_dir for the indexer search if available
if not show_dir: if use_show_name:
t.default_show_name = show_name
elif not show_dir:
t.default_show_name = '' t.default_show_name = ''
elif not show_name: elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' ') t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' ')
@ -2988,11 +3016,21 @@ class NewHomeAddShows(MainHandler):
t.submenu = HomeMenu() t.submenu = HomeMenu()
t.trending_shows = TraktCall("shows/trending.json/%API%", sickbeard.TRAKT_API_KEY) t.trending_shows = TraktCall("shows/trending.json/%API%", sickbeard.TRAKT_API_KEY)
t.trending_inlibrary = 0
if None is not t.trending_shows: if None is not t.trending_shows:
for item in t.trending_shows: for item in t.trending_shows:
if helpers.findCertainShow(sickbeard.showList, int(item['tvdb_id'])): tvdbs = ['tvdb_id', 'tvrage_id']
item['tvdb_id'] = u'ExistsInLibrary' for index, tvdb in enumerate(tvdbs):
try:
item[u'show_id'] = item[tvdb]
tvshow = helpers.findCertainShow(sickbeard.showList, int(item[tvdb]))
except:
continue
# check tvshow indexer is not using the same id from another indexer
if tvshow and (index + 1) == tvshow.indexer:
item[u'show_id'] = u'%s:%s' % (tvshow.indexer, item[tvdb])
t.trending_inlibrary += 1
break
return _munge(t) return _munge(t)
@ -3008,37 +3046,7 @@ class NewHomeAddShows(MainHandler):
def addTraktShow(self, indexer_id, showName): def addTraktShow(self, indexer_id, showName):
if helpers.findCertainShow(sickbeard.showList, int(indexer_id)): if helpers.findCertainShow(sickbeard.showList, int(indexer_id)):
return return
return self.newShow('|'.join(['', '', indexer_id, showName]), use_show_name=True)
if sickbeard.ROOT_DIRS:
root_dirs = sickbeard.ROOT_DIRS.split('|')
location = root_dirs[int(root_dirs[0]) + 1]
else:
location = None
if location:
show_dir = ek.ek(os.path.join, location, helpers.sanitizeFileName(showName))
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR)
return
else:
helpers.chmodAsParent(show_dir)
sickbeard.showQueueScheduler.action.addShow(1, int(indexer_id), show_dir,
default_status=sickbeard.STATUS_DEFAULT,
quality=sickbeard.QUALITY_DEFAULT,
flatten_folders=sickbeard.FLATTEN_FOLDERS_DEFAULT,
subtitles=sickbeard.SUBTITLES_DEFAULT,
anime=sickbeard.ANIME_DEFAULT,
scene=sickbeard.SCENE_DEFAULT)
ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir)
else:
logger.log(u"There was an error creating the show, no root directory setting found", logger.ERROR)
return
# done adding show
redirect('/home/')
def addNewShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None, def addNewShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
@ -3751,15 +3759,7 @@ class Home(MainHandler):
epCounts[curEpCat] += 1 epCounts[curEpCat] += 1
def titler(x): def titler(x):
if not x or sickbeard.SORT_ARTICLE: return (remove_article(x), x)[not x or sickbeard.SORT_ARTICLE]
return x
if x.lower().startswith('a '):
x = x[2:]
if x.lower().startswith('an '):
x = x[3:]
elif x.lower().startswith('the '):
x = x[4:]
return x
if sickbeard.ANIME_SPLIT_HOME: if sickbeard.ANIME_SPLIT_HOME:
shows = [] shows = []