id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
1,300
tvcache.py
midgetspy_Sick-Beard/sickbeard/tvcache.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import time import datetime import sqlite3 import sickbeard from sickbeard import db from sickbeard import logger from sickbeard.common import Quality from sickbeard import helpers, show_name_helpers from sickbeard import name_cache from sickbeard.exceptions import ex, AuthException try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree from lib.tvdb_api import tvdb_api, tvdb_exceptions from name_parser.parser import NameParser, InvalidNameException class CacheDBConnection(db.DBConnection): def __init__(self, providerName): db.DBConnection.__init__(self, "cache.db") # Create the table if it's not already there try: sql = "CREATE TABLE [" + providerName + "] (name TEXT, season NUMERIC, episodes TEXT, tvrid NUMERIC, tvdbid NUMERIC, url TEXT, time NUMERIC, quality TEXT);" self.connection.execute(sql) self.connection.commit() except sqlite3.OperationalError, e: if str(e) != "table [" + providerName + "] already exists": raise # Create the table if it's not already there try: sql = "CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);" self.connection.execute(sql) self.connection.commit() except sqlite3.OperationalError, e: if str(e) != "table lastUpdate already exists": raise class TVCache(): def __init__(self, provider): self.provider = provider self.providerID = self.provider.getID() self.minTime = 10 def _getDB(self): return CacheDBConnection(self.providerID) def _clearCache(self): myDB = self._getDB() myDB.action("DELETE FROM [" + self.providerID + "] WHERE 1") def _getRSSData(self): data = None return data def _checkAuth(self, parsedXML): return True def _checkItemAuth(self, title, url): return True def updateCache(self): if not self.shouldUpdate(): return if self._checkAuth(None): data = self._getRSSData() # as long as the http request worked we count this as an update if data: self.setLastUpdate() else: return [] # now that we've loaded the current RSS feed lets delete the old cache logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() parsedXML = helpers.parse_xml(data) if parsedXML is None: logger.log(u"Error trying to load " + self.provider.name + " RSS feed", logger.ERROR) return [] if self._checkAuth(parsedXML): if parsedXML.tag == 'rss': items = parsedXML.findall('.//item') else: logger.log(u"Resulting XML from " + self.provider.name + " isn't RSS, not parsing it", logger.ERROR) return [] for item in items: self._parseItem(item) else: raise AuthException(u"Your authentication credentials for " + self.provider.name + " are incorrect, check your config") return [] def _translateTitle(self, title): return title.replace(' ', '.') def _translateLinkURL(self, url): return url.replace('&amp;', '&') def _parseItem(self, item): title = helpers.get_xml_text(item.find('title')) url = helpers.get_xml_text(item.find('link')) self._checkItemAuth(title, url) if title and url: title = self._translateTitle(title) url = self._translateLinkURL(url) logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG) self._addCacheEntry(title, url) else: logger.log(u"The XML returned from the " + self.provider.name + " feed is incomplete, this result is unusable", logger.DEBUG) return def _getLastUpdate(self): myDB = self._getDB() sqlResults = myDB.select("SELECT time FROM lastUpdate WHERE provider = ?", [self.providerID]) if sqlResults: lastTime = int(sqlResults[0]["time"]) if lastTime > int(time.mktime(datetime.datetime.today().timetuple())): lastTime = 0 else: lastTime = 0 return datetime.datetime.fromtimestamp(lastTime) def setLastUpdate(self, toDate=None): if not toDate: toDate = datetime.datetime.today() myDB = self._getDB() myDB.upsert("lastUpdate", {'time': int(time.mktime(toDate.timetuple()))}, {'provider': self.providerID}) lastUpdate = property(_getLastUpdate) def shouldUpdate(self): # if we've updated recently then skip the update if datetime.datetime.today() - self.lastUpdate < datetime.timedelta(minutes=self.minTime): logger.log(u"Last update was too soon, using old cache: today()-" + str(self.lastUpdate) + "<" + str(datetime.timedelta(minutes=self.minTime)), logger.DEBUG) return False return True def _addCacheEntry(self, name, url, season=None, episodes=None, tvdb_id=0, tvrage_id=0, quality=None, extraNames=[]): myDB = self._getDB() parse_result = None # if we don't have complete info then parse the filename to get it for curName in [name] + extraNames: try: myParser = NameParser() parse_result = myParser.parse(curName) except InvalidNameException: logger.log(u"Unable to parse the filename " + curName + " into a valid episode", logger.DEBUG) continue if not parse_result: logger.log(u"Giving up because I'm unable to parse this name: " + name, logger.DEBUG) return False if not parse_result.series_name: logger.log(u"No series name retrieved from " + name + ", unable to cache it", logger.DEBUG) return False tvdb_lang = None # if we need tvdb_id or tvrage_id then search the DB for them if not tvdb_id or not tvrage_id: # if we have only the tvdb_id, use the database if tvdb_id: showObj = helpers.findCertainShow(sickbeard.showList, tvdb_id) if showObj: tvrage_id = showObj.tvrid tvdb_lang = showObj.lang else: logger.log(u"We were given a TVDB id " + str(tvdb_id) + " but it doesn't match a show we have in our list, so leaving tvrage_id empty", logger.DEBUG) tvrage_id = 0 # if we have only a tvrage_id then use the database elif tvrage_id: showObj = helpers.findCertainTVRageShow(sickbeard.showList, tvrage_id) if showObj: tvdb_id = showObj.tvdbid tvdb_lang = showObj.lang else: logger.log(u"We were given a TVRage id " + str(tvrage_id) + " but it doesn't match a show we have in our list, so leaving tvdb_id empty", logger.DEBUG) tvdb_id = 0 # if they're both empty then fill out as much info as possible by searching the show name else: # check the name cache and see if we already know what show this is logger.log(u"Checking the cache to see if we already know the tvdb id of " + parse_result.series_name, logger.DEBUG) tvdb_id = name_cache.retrieveNameFromCache(parse_result.series_name) # remember if the cache lookup worked or not so we know whether we should bother updating it later if tvdb_id == None: logger.log(u"No cache results returned, continuing on with the search", logger.DEBUG) from_cache = False else: logger.log(u"Cache lookup found " + repr(tvdb_id) + ", using that", logger.DEBUG) from_cache = True # if the cache failed, try looking up the show name in the database if tvdb_id == None: logger.log(u"Trying to look the show up in the show database", logger.DEBUG) showResult = helpers.searchDBForShow(parse_result.series_name) if showResult: logger.log(u"" + parse_result.series_name + " was found to be show " + showResult[1] + " (" + str(showResult[0]) + ") in our DB.", logger.DEBUG) tvdb_id = showResult[0] # if the DB lookup fails then do a comprehensive regex search if tvdb_id == None: logger.log(u"Couldn't figure out a show name straight from the DB, trying a regex search instead", logger.DEBUG) for curShow in sickbeard.showList: if show_name_helpers.isGoodResult(name, curShow, False): logger.log(u"Successfully matched " + name + " to " + curShow.name + " with regex", logger.DEBUG) tvdb_id = curShow.tvdbid tvdb_lang = curShow.lang break # if tvdb_id was anything but None (0 or a number) then if not from_cache: name_cache.addNameToCache(parse_result.series_name, tvdb_id) # if we came out with tvdb_id = None it means we couldn't figure it out at all, just use 0 for that if tvdb_id == None: tvdb_id = 0 # if we found the show then retrieve the show object if tvdb_id: showObj = helpers.findCertainShow(sickbeard.showList, tvdb_id) if showObj: tvrage_id = showObj.tvrid tvdb_lang = showObj.lang # if we weren't provided with season/episode information then get it from the name that we parsed if not season: season = parse_result.season_number if parse_result.season_number != None else 1 if not episodes: episodes = parse_result.episode_numbers # if we have an air-by-date show then get the real season/episode numbers if parse_result.air_by_date and tvdb_id: try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if not (tvdb_lang == "" or tvdb_lang == "en" or tvdb_lang == None): ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(**ltvdb_api_parms) epObj = t[tvdb_id].airedOn(parse_result.air_date)[0] season = int(epObj["seasonnumber"]) episodes = [int(epObj["episodenumber"])] except tvdb_exceptions.tvdb_episodenotfound: logger.log(u"Unable to find episode with date " + str(parse_result.air_date) + " for show " + parse_result.series_name + ", skipping", logger.WARNING) return False except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to contact TVDB: " + ex(e), logger.WARNING) return False episodeText = "|" + "|".join(map(str, episodes)) + "|" # get the current timestamp curTimestamp = int(time.mktime(datetime.datetime.today().timetuple())) if not quality: quality = Quality.nameQuality(name) myDB.action("INSERT INTO [" + self.providerID + "] (name, season, episodes, tvrid, tvdbid, url, time, quality) VALUES (?,?,?,?,?,?,?,?)", [name, season, episodeText, tvrage_id, tvdb_id, url, curTimestamp, quality]) def searchCache(self, episode, manualSearch=False): neededEps = self.findNeededEpisodes(episode, manualSearch) return neededEps[episode] def listPropers(self, date=None, delimiter="."): myDB = self._getDB() sql = "SELECT * FROM [" + self.providerID + "] WHERE name LIKE '%.PROPER.%' OR name LIKE '%.REPACK.%'" if date != None: sql += " AND time >= " + str(int(time.mktime(date.timetuple()))) #return filter(lambda x: x['tvdbid'] != 0, myDB.select(sql)) return myDB.select(sql) def findNeededEpisodes(self, episode=None, manualSearch=False): neededEps = {} if episode: neededEps[episode] = [] myDB = self._getDB() if not episode: sqlResults = myDB.select("SELECT * FROM [" + self.providerID + "]") else: sqlResults = myDB.select("SELECT * FROM [" + self.providerID + "] WHERE tvdbid = ? AND season = ? AND episodes LIKE ?", [episode.show.tvdbid, episode.season, "%|" + str(episode.episode) + "|%"]) # for each cache entry for curResult in sqlResults: # skip non-tv crap if not show_name_helpers.filterBadReleases(curResult["name"]): continue # get the show object, or if it's not one of our shows then ignore it showObj = helpers.findCertainShow(sickbeard.showList, int(curResult["tvdbid"])) if not showObj: continue # get season and ep data (ignoring multi-eps for now) curSeason = int(curResult["season"]) if curSeason == -1: continue curEp = curResult["episodes"].split("|")[1] if not curEp: continue curEp = int(curEp) curQuality = int(curResult["quality"]) # if the show says we want that episode then add it to the list if not showObj.wantEpisode(curSeason, curEp, curQuality, manualSearch): logger.log(u"Skipping " + curResult["name"] + " because we don't want an episode that's " + Quality.qualityStrings[curQuality], logger.DEBUG) else: if episode: epObj = episode else: epObj = showObj.getEpisode(curSeason, curEp) # build a result object title = curResult["name"] url = curResult["url"] logger.log(u"Found result " + title + " at " + url) result = self.provider.getResult([epObj]) result.url = url result.name = title result.quality = curQuality # add it to the list if epObj not in neededEps: neededEps[epObj] = [result] else: neededEps[epObj].append(result) return neededEps
16,183
Python
.py
304
39.180921
207
0.578605
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,301
encodingKludge.py
midgetspy_Sick-Beard/sickbeard/encodingKludge.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os from sickbeard import logger import sickbeard # This module tries to deal with the apparently random behavior of python when dealing with unicode <-> utf-8 # encodings. It tries to just use unicode, but if that fails then it tries forcing it to utf-8. Any functions # which return something should always return unicode. def fixStupidEncodings(x, silent=False): if type(x) == str: try: return x.decode(sickbeard.SYS_ENCODING) except UnicodeDecodeError: logger.log(u"Unable to decode value: "+repr(x), logger.ERROR) return None elif type(x) == unicode: return x else: logger.log(u"Unknown value passed in, ignoring it: "+str(type(x))+" ("+repr(x)+":"+repr(type(x))+")", logger.DEBUG if silent else logger.ERROR) return None return None def fixListEncodings(x): if type(x) != list and type(x) != tuple: return x else: return filter(lambda x: x != None, map(fixStupidEncodings, x)) def callPeopleStupid(x): try: return x.encode(sickbeard.SYS_ENCODING) except UnicodeEncodeError: logger.log(u"YOUR COMPUTER SUCKS! Your data is being corrupted by a bad locale/encoding setting. Report this error on the forums or IRC please: "+repr(x)+", "+sickbeard.SYS_ENCODING, logger.ERROR) return x.encode(sickbeard.SYS_ENCODING, 'ignore') def ek(func, *args): result = None if os.name == 'nt': result = func(*args) else: result = func(*[callPeopleStupid(x) if type(x) in (str, unicode) else x for x in args]) if type(result) in (list, tuple): return fixListEncodings(result) elif type(result) == str: return fixStupidEncodings(result) else: return result
2,589
Python
.py
59
37.949153
205
0.686454
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,302
ui.py
midgetspy_Sick-Beard/sickbeard/ui.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import cherrypy import sickbeard MESSAGE = 'notice' ERROR = 'error' class Notifications(object): """ A queue of Notification objects. """ def __init__(self): self._messages = [] self._errors = [] def message(self, title, message=''): """ Add a regular notification to the queue title: The title of the notification message: The message portion of the notification """ self._messages.append(Notification(title, message, MESSAGE)) def error(self, title, message=''): """ Add an error notification to the queue title: The title of the notification message: The message portion of the notification """ self._errors.append(Notification(title, message, ERROR)) def get_notifications(self): """ Return all the available notifications in a list. Marks them all as seen as it returns them. Also removes timed out Notifications from the queue. Returns: A list of Notification objects """ # filter out expired notifications self._errors = [x for x in self._errors if not x.is_expired()] self._messages = [x for x in self._messages if not x.is_expired()] # return any notifications that haven't been shown to the client already return [x.see() for x in self._errors + self._messages if x.is_new()] # static notification queue object notifications = Notifications() class Notification(object): """ Represents a single notification. Tracks its own timeout and a list of which clients have seen it before. """ def __init__(self, title, message='', type=None, timeout=None): self.title = title self.message = message self._when = datetime.datetime.now() self._seen = [] if type: self.type = type else: self.type = MESSAGE if timeout: self._timeout = timeout else: self._timeout = datetime.timedelta(minutes=1) def is_new(self): """ Returns True if the notification hasn't been displayed to the current client (aka IP address). """ return cherrypy.request.remote.ip not in self._seen def is_expired(self): """ Returns True if the notification is older than the specified timeout value. """ return datetime.datetime.now() - self._when > self._timeout def see(self): """ Returns this notification object and marks it as seen by the client ip """ self._seen.append(cherrypy.request.remote.ip) return self class ProgressIndicator(): def __init__(self, percentComplete=0, currentStatus={'title': ''}): self.percentComplete = percentComplete self.currentStatus = currentStatus class ProgressIndicators(): _pi = {'massUpdate': [], 'massAdd': [], 'dailyUpdate': [] } @staticmethod def getIndicator(name): if name not in ProgressIndicators._pi: return [] # if any of the progress indicators are done take them off the list for curPI in ProgressIndicators._pi[name]: if curPI != None and curPI.percentComplete() == 100: ProgressIndicators._pi[name].remove(curPI) # return the list of progress indicators associated with this name return ProgressIndicators._pi[name] @staticmethod def setIndicator(name, indicator): ProgressIndicators._pi[name].append(indicator) class QueueProgressIndicator(): """ A class used by the UI to show the progress of the queue or a part of it. """ def __init__(self, name, queueItemList): self.queueItemList = queueItemList self.name = name def numTotal(self): return len(self.queueItemList) def numFinished(self): return len([x for x in self.queueItemList if not x.isInQueue()]) def numRemaining(self): return len([x for x in self.queueItemList if x.isInQueue()]) def nextName(self): for curItem in [sickbeard.showQueueScheduler.action.currentItem]+sickbeard.showQueueScheduler.action.queue: #@UndefinedVariable if curItem in self.queueItemList: return curItem.name return "Unknown" def percentComplete(self): numFinished = self.numFinished() numTotal = self.numTotal() if numTotal == 0: return 0 else: return int(float(numFinished)/float(numTotal)*100) class LoadingTVShow(): def __init__(self, dir): self.dir = dir self.show = None
5,717
Python
.py
141
31.624113
136
0.639227
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,303
versionChecker.py
midgetspy_Sick-Beard/sickbeard/versionChecker.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import sickbeard from sickbeard import helpers from sickbeard import version, ui from sickbeard import logger from sickbeard import scene_exceptions from sickbeard.exceptions import ex from sickbeard import encodingKludge as ek import os import platform import shutil import subprocess import re import urllib import zipfile import tarfile import gh_api as github class CheckVersion(): """ Version check class meant to run as a thread object with the SB scheduler. """ def __init__(self): self.install_type = self.find_install_type() if self.install_type == 'win': self.updater = WindowsUpdateManager() elif self.install_type == 'git': self.updater = GitUpdateManager() elif self.install_type == 'source': self.updater = SourceUpdateManager() else: self.updater = None def run(self): self.check_for_new_version() # refresh scene exceptions too scene_exceptions.retrieve_exceptions() def find_install_type(self): """ Determines how this copy of SB was installed. returns: type of installation. Possible values are: 'win': any compiled windows build 'git': running from source using git 'source': running from source without git """ # check if we're a windows build if sickbeard.version.SICKBEARD_VERSION.startswith('build '): install_type = 'win' elif os.path.isdir(ek.ek(os.path.join, sickbeard.PROG_DIR, u'.git')): install_type = 'git' else: install_type = 'source' return install_type def check_for_new_version(self, force=False): """ Checks the internet for a newer version. returns: bool, True for new version or False for no new version. force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced """ if not sickbeard.VERSION_NOTIFY and not force: logger.log(u"Version checking is disabled, not checking for the newest version") return False logger.log(u"Checking if " + self.install_type + " needs an update") if not self.updater.need_update(): sickbeard.NEWEST_VERSION_STRING = None logger.log(u"No update needed") if force: ui.notifications.message('No update needed') return False self.updater.set_newest_text() return True def update(self): if self.updater.need_update(): return self.updater.update() class UpdateManager(): def get_github_repo_user(self): return 'midgetspy' def get_github_repo(self): return 'Sick-Beard' def get_update_url(self): return sickbeard.WEB_ROOT + "/home/update/?pid=" + str(sickbeard.PID) class WindowsUpdateManager(UpdateManager): def __init__(self): self.github_repo_user = self.get_github_repo_user() self.github_repo = self.get_github_repo() self.branch = 'windows_binaries' self._cur_version = None self._cur_commit_hash = None self._newest_version = None self.releases_url = "https://github.com/" + self.github_repo_user + "/" + self.github_repo + "/" + "releases" + "/" self.version_url = "https://raw.github.com/" + self.github_repo_user + "/" + self.github_repo + "/" + self.branch + "/updates.txt" def _find_installed_version(self): try: version = sickbeard.version.SICKBEARD_VERSION return int(version[6:]) except ValueError: logger.log(u"Unknown SickBeard Windows binary release: " + version, logger.ERROR) return None def _find_newest_version(self, whole_link=False): """ Checks git for the newest Windows binary build. Returns either the build number or the entire build URL depending on whole_link's value. whole_link: If True, returns the entire URL to the release. If False, it returns only the build number. default: False """ regex = ".*SickBeard\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip" version_url_data = helpers.getURL(self.version_url) if version_url_data is None: return None else: for curLine in version_url_data.splitlines(): logger.log(u"checking line " + curLine, logger.DEBUG) match = re.match(regex, curLine) if match: logger.log(u"found a match", logger.DEBUG) if whole_link: return curLine.strip() else: return int(match.group(1)) return None def need_update(self): self._cur_version = self._find_installed_version() self._newest_version = self._find_newest_version() logger.log(u"newest version: " + repr(self._newest_version), logger.DEBUG) if self._newest_version and self._newest_version > self._cur_version: return True return False def set_newest_text(self): sickbeard.NEWEST_VERSION_STRING = None if not self._cur_version: newest_text = "Unknown SickBeard Windows binary version. Not updating with original version." else: newest_text = 'There is a <a href="' + self.releases_url + '" onclick="window.open(this.href); return false;">newer version available</a> (build ' + str(self._newest_version) + ')' newest_text += "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>" sickbeard.NEWEST_VERSION_STRING = newest_text def update(self): zip_download_url = self._find_newest_version(True) logger.log(u"new_link: " + repr(zip_download_url), logger.DEBUG) if not zip_download_url: logger.log(u"Unable to find a new version link, not updating") return False try: # prepare the update dir sb_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sb-update') if os.path.isdir(sb_update_dir): logger.log(u"Clearing out update folder " + sb_update_dir + " before extracting") shutil.rmtree(sb_update_dir) logger.log(u"Creating update folder " + sb_update_dir + " before extracting") os.makedirs(sb_update_dir) # retrieve file logger.log(u"Downloading update from " + zip_download_url) zip_download_path = os.path.join(sb_update_dir, u'sb-update.zip') urllib.urlretrieve(zip_download_url, zip_download_path) if not ek.ek(os.path.isfile, zip_download_path): logger.log(u"Unable to retrieve new version from " + zip_download_url + ", can't update", logger.ERROR) return False if not ek.ek(zipfile.is_zipfile, zip_download_path): logger.log(u"Retrieved version from " + zip_download_url + " is corrupt, can't update", logger.ERROR) return False # extract to sb-update dir logger.log(u"Unzipping from " + str(zip_download_path) + " to " + sb_update_dir) update_zip = zipfile.ZipFile(zip_download_path, 'r') update_zip.extractall(sb_update_dir) update_zip.close() # delete the zip logger.log(u"Deleting zip file from " + str(zip_download_path)) os.remove(zip_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))] if len(update_dir_contents) != 1: logger.log(u"Invalid update data, update failed. Maybe try deleting your sb-update folder?", logger.ERROR) return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) old_update_path = os.path.join(content_dir, u'updater.exe') new_update_path = os.path.join(sickbeard.PROG_DIR, u'updater.exe') logger.log(u"Copying new update.exe file from " + old_update_path + " to " + new_update_path) shutil.move(old_update_path, new_update_path) except Exception, e: logger.log(u"Error while trying to update: " + ex(e), logger.ERROR) return False return True class GitUpdateManager(UpdateManager): def __init__(self): self._git_path = self._find_working_git() self.github_repo_user = self.get_github_repo_user() self.github_repo = self.get_github_repo() self.branch = self._find_git_branch() self._cur_commit_hash = None self._newest_commit_hash = None self._num_commits_behind = 0 self._num_commits_ahead = 0 def _git_error(self): error_message = 'Unable to find your git executable - Shutdown SickBeard and EITHER <a href="http://code.google.com/p/sickbeard/wiki/AdvancedSettings" onclick="window.open(this.href); return false;">set git_path in your config.ini</a> OR delete your .git folder and run from source to enable updates.' sickbeard.NEWEST_VERSION_STRING = error_message def _find_working_git(self): test_cmd = 'version' if sickbeard.GIT_PATH: main_git = '"' + sickbeard.GIT_PATH + '"' else: main_git = 'git' logger.log(u"Checking if we can use git commands: " + main_git + ' ' + test_cmd, logger.DEBUG) output, err, exit_status = self._run_git(main_git, test_cmd) # @UnusedVariable if exit_status == 0: logger.log(u"Using: " + main_git, logger.DEBUG) return main_git else: logger.log(u"Not using: " + main_git, logger.DEBUG) # trying alternatives alternative_git = [] # osx people who start SB from launchd have a broken path, so try a hail-mary attempt for them if platform.system().lower() == 'darwin': alternative_git.append('/usr/local/git/bin/git') if platform.system().lower() == 'windows': if main_git != main_git.lower(): alternative_git.append(main_git.lower()) if alternative_git: logger.log(u"Trying known alternative git locations", logger.DEBUG) for cur_git in alternative_git: logger.log(u"Checking if we can use git commands: " + cur_git + ' ' + test_cmd, logger.DEBUG) output, err, exit_status = self._run_git(cur_git, test_cmd) # @UnusedVariable if exit_status == 0: logger.log(u"Using: " + cur_git, logger.DEBUG) return cur_git else: logger.log(u"Not using: " + cur_git, logger.DEBUG) # Still haven't found a working git error_message = 'Unable to find your git executable - Shutdown SickBeard and EITHER <a href="http://code.google.com/p/sickbeard/wiki/AdvancedSettings" onclick="window.open(this.href); return false;">set git_path in your config.ini</a> OR delete your .git folder and run from source to enable updates.' sickbeard.NEWEST_VERSION_STRING = error_message return None def _run_git(self, git_path, args): output = err = exit_status = None if not git_path: logger.log(u"No git specified, can't use git commands", logger.ERROR) exit_status = 1 return (output, err, exit_status) cmd = git_path + ' ' + args try: logger.log(u"Executing " + cmd + " with your shell in " + sickbeard.PROG_DIR, logger.DEBUG) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=sickbeard.PROG_DIR) output, err = p.communicate() exit_status = p.returncode if output: output = output.strip() logger.log(u"git output: " + output, logger.DEBUG) except OSError: logger.log(u"Command " + cmd + " didn't work") exit_status = 1 if exit_status == 0: logger.log(cmd + u" : returned successful", logger.DEBUG) exit_status = 0 elif exit_status == 1: logger.log(cmd + u" returned : " + output, logger.ERROR) exit_status = 1 elif exit_status == 128 or 'fatal:' in output or err: logger.log(cmd + u" returned : " + output, logger.ERROR) exit_status = 128 else: logger.log(cmd + u" returned : " + output + u", treat as error for now", logger.ERROR) exit_status = 1 return (output, err, exit_status) def _find_installed_version(self): """ Attempts to find the currently installed version of Sick Beard. Uses git show to get commit version. Returns: True for success or False for failure """ output, err, exit_status = self._run_git(self._git_path, 'rev-parse HEAD') # @UnusedVariable if exit_status == 0 and output: cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR) return False self._cur_commit_hash = cur_commit_hash return True else: return False def _find_git_branch(self): branch_info, err, exit_status = self._run_git(self._git_path, 'symbolic-ref -q HEAD') # @UnusedVariable if exit_status == 0 and branch_info: branch = branch_info.strip().replace('refs/heads/', '', 1) if branch: sickbeard.version.SICKBEARD_VERSION = branch return sickbeard.version.SICKBEARD_VERSION def _check_github_for_update(self): """ Uses git commands to check if there is a newer version that the provided commit hash. If there is a newer version it sets _num_commits_behind. """ self._newest_commit_hash = None self._num_commits_behind = 0 self._num_commits_ahead = 0 # get all new info from github output, err, exit_status = self._run_git(self._git_path, 'fetch origin') # @UnusedVariable if not exit_status == 0: logger.log(u"Unable to contact github, can't check for update", logger.ERROR) return # get latest commit_hash from remote output, err, exit_status = self._run_git(self._git_path, 'rev-parse --verify --quiet "@{upstream}"') # @UnusedVariable if exit_status == 0 and output: cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): logger.log(u"Output doesn't look like a hash, not using it", logger.DEBUG) return else: self._newest_commit_hash = cur_commit_hash else: logger.log(u"git didn't return newest commit hash", logger.DEBUG) return # get number of commits behind and ahead (option --count not supported git < 1.7.2) output, err, exit_status = self._run_git(self._git_path, 'rev-list --left-right "@{upstream}"...HEAD') # @UnusedVariable if exit_status == 0 and output: try: self._num_commits_behind = int(output.count("<")) self._num_commits_ahead = int(output.count(">")) except: logger.log(u"git didn't return numbers for behind and ahead, not using it", logger.DEBUG) return logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash) + u", num_commits_behind = " + str(self._num_commits_behind) + u", num_commits_ahead = " + str(self._num_commits_ahead), logger.DEBUG) def set_newest_text(self): # if we're up to date then don't set this sickbeard.NEWEST_VERSION_STRING = None if self._num_commits_ahead: logger.log(u"Local branch is ahead of " + self.branch + ". Automatic update not possible.", logger.ERROR) newest_text = "Local branch is ahead of " + self.branch + ". Automatic update not possible." elif self._num_commits_behind > 0: base_url = 'http://github.com/' + self.github_repo_user + '/' + self.github_repo if self._newest_commit_hash: url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash else: url = base_url + '/commits/' newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a> ' newest_text += " (you're " + str(self._num_commits_behind) + " commit" if self._num_commits_behind > 1: newest_text += 's' newest_text += ' behind)' + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>" else: return sickbeard.NEWEST_VERSION_STRING = newest_text def need_update(self): self._find_installed_version() if not self._cur_commit_hash: return True else: try: self._check_github_for_update() except Exception, e: logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) return False if self._num_commits_behind > 0: return True return False def update(self): """ Calls git pull origin <branch> in order to update Sick Beard. Returns a bool depending on the call's success. """ output, err, exit_status = self._run_git(self._git_path, 'pull origin ' + self.branch) # @UnusedVariable if exit_status == 0: return True else: return False return False class SourceUpdateManager(UpdateManager): def __init__(self): self.github_repo_user = self.get_github_repo_user() self.github_repo = self.get_github_repo() self.branch = sickbeard.version.SICKBEARD_VERSION self._cur_commit_hash = None self._newest_commit_hash = None self._num_commits_behind = 0 def _find_installed_version(self): version_file = ek.ek(os.path.join, sickbeard.PROG_DIR, u'version.txt') if not os.path.isfile(version_file): self._cur_commit_hash = None return try: with open(version_file, 'r') as fp: self._cur_commit_hash = fp.read().strip(' \n\r') except EnvironmentError, e: logger.log(u"Unable to open 'version.txt': " + ex(e), logger.DEBUG) if not self._cur_commit_hash: self._cur_commit_hash = None def need_update(self): self._find_installed_version() try: self._check_github_for_update() except Exception, e: logger.log(u"Unable to contact github, can't check for update: " + repr(e), logger.ERROR) return False if not self._cur_commit_hash or self._num_commits_behind > 0: return True return False def _check_github_for_update(self): """ Uses pygithub to ask github if there is a newer version that the provided commit hash. If there is a newer version it sets Sick Beard's version text. commit_hash: hash that we're checking against """ self._num_commits_behind = 0 self._newest_commit_hash = None gh = github.GitHub(self.github_repo_user, self.github_repo, self.branch) # try to get newest commit hash and commits behind directly by comparing branch and current commit if self._cur_commit_hash: branch_compared = gh.compare(base=self.branch, head=self._cur_commit_hash) if 'base_commit' in branch_compared: self._newest_commit_hash = branch_compared['base_commit']['sha'] if 'behind_by' in branch_compared: self._num_commits_behind = int(branch_compared['behind_by']) # fall back and iterate over last 100 (items per page in gh_api) commits if not self._newest_commit_hash: for curCommit in gh.commits(): if not self._newest_commit_hash: self._newest_commit_hash = curCommit['sha'] if not self._cur_commit_hash: break if curCommit['sha'] == self._cur_commit_hash: break # when _cur_commit_hash doesn't match anything _num_commits_behind == 100 self._num_commits_behind += 1 logger.log(u"cur_commit = " + str(self._cur_commit_hash) + u", newest_commit = " + str(self._newest_commit_hash) + u", num_commits_behind = " + str(self._num_commits_behind), logger.DEBUG) def set_newest_text(self): # if we're up to date then don't set this sickbeard.NEWEST_VERSION_STRING = None if not self._cur_commit_hash: logger.log(u"Unknown current version number, don't know if we should update or not", logger.DEBUG) newest_text = "Unknown current version number: If you've never used the Sick Beard upgrade system before then current version is not set." newest_text += "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>" elif self._num_commits_behind > 0: base_url = 'http://github.com/' + self.github_repo_user + '/' + self.github_repo if self._newest_commit_hash: url = base_url + '/compare/' + self._cur_commit_hash + '...' + self._newest_commit_hash else: url = base_url + '/commits/' newest_text = 'There is a <a href="' + url + '" onclick="window.open(this.href); return false;">newer version available</a>' newest_text += " (you're " + str(self._num_commits_behind) + " commit" if self._num_commits_behind > 1: newest_text += "s" newest_text += " behind)" + "&mdash; <a href=\"" + self.get_update_url() + "\">Update Now</a>" else: return sickbeard.NEWEST_VERSION_STRING = newest_text def update(self): """ Downloads the latest source tarball from github and installs it over the existing version. """ base_url = 'https://github.com/' + self.github_repo_user + '/' + self.github_repo tar_download_url = base_url + '/tarball/' + self.branch version_path = ek.ek(os.path.join, sickbeard.PROG_DIR, u'version.txt') try: # prepare the update dir sb_update_dir = ek.ek(os.path.join, sickbeard.PROG_DIR, u'sb-update') if os.path.isdir(sb_update_dir): logger.log(u"Clearing out update folder " + sb_update_dir + " before extracting") shutil.rmtree(sb_update_dir) logger.log(u"Creating update folder " + sb_update_dir + " before extracting") os.makedirs(sb_update_dir) # retrieve file logger.log(u"Downloading update from " + repr(tar_download_url)) tar_download_path = os.path.join(sb_update_dir, u'sb-update.tar') urllib.urlretrieve(tar_download_url, tar_download_path) if not ek.ek(os.path.isfile, tar_download_path): logger.log(u"Unable to retrieve new version from " + tar_download_url + ", can't update", logger.ERROR) return False if not ek.ek(tarfile.is_tarfile, tar_download_path): logger.log(u"Retrieved version from " + tar_download_url + " is corrupt, can't update", logger.ERROR) return False # extract to sb-update dir logger.log(u"Extracting file " + tar_download_path) tar = tarfile.open(tar_download_path) tar.extractall(sb_update_dir) tar.close() # delete .tar.gz logger.log(u"Deleting file " + tar_download_path) os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))] if len(update_dir_contents) != 1: logger.log(u"Invalid update data, update failed: " + str(update_dir_contents), logger.ERROR) return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder logger.log(u"Moving files from " + content_dir + " to " + sickbeard.PROG_DIR) for dirname, dirnames, filenames in os.walk(content_dir): # @UnusedVariable dirname = dirname[len(content_dir) + 1:] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile) if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) # update version.txt with commit hash try: with open(version_path, 'w') as ver_file: ver_file.write(self._newest_commit_hash) except EnvironmentError, e: logger.log(u"Unable to write version file, update not complete: " + ex(e), logger.ERROR) return False except Exception, e: logger.log(u"Error while trying to update: " + ex(e), logger.ERROR) return False return True
27,509
Python
.py
518
40.453668
310
0.589563
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,304
search_queue.py
midgetspy_Sick-Beard/sickbeard/search_queue.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import time import sickbeard from sickbeard import db, logger, common, exceptions, helpers from sickbeard import generic_queue from sickbeard import search from sickbeard import ui BACKLOG_SEARCH = 10 RSS_SEARCH = 20 MANUAL_SEARCH = 30 class SearchQueue(generic_queue.GenericQueue): def __init__(self): generic_queue.GenericQueue.__init__(self) self.queue_name = "SEARCHQUEUE" def is_in_queue(self, show, segment): for cur_item in self.queue: if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment: return True return False def is_ep_in_queue(self, ep_obj): for cur_item in self.queue: if isinstance(cur_item, ManualSearchQueueItem) and cur_item.ep_obj == ep_obj: return True return False def pause_backlog(self): self.min_priority = generic_queue.QueuePriorities.HIGH def unpause_backlog(self): self.min_priority = 0 def is_backlog_paused(self): # backlog priorities are NORMAL, this should be done properly somewhere return self.min_priority >= generic_queue.QueuePriorities.NORMAL def is_backlog_in_progress(self): for cur_item in self.queue + [self.currentItem]: if isinstance(cur_item, BacklogQueueItem): return True return False def add_item(self, item): if isinstance(item, RSSSearchQueueItem): generic_queue.GenericQueue.add_item(self, item) # don't do duplicates elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment): generic_queue.GenericQueue.add_item(self, item) elif isinstance(item, ManualSearchQueueItem) and not self.is_ep_in_queue(item.ep_obj): generic_queue.GenericQueue.add_item(self, item) else: logger.log(u"Not adding item, it's already in the queue", logger.DEBUG) class ManualSearchQueueItem(generic_queue.QueueItem): def __init__(self, ep_obj): generic_queue.QueueItem.__init__(self, 'Manual Search', MANUAL_SEARCH) self.priority = generic_queue.QueuePriorities.HIGH self.ep_obj = ep_obj self.success = None def execute(self): generic_queue.QueueItem.execute(self) logger.log(u"Beginning manual search for " + self.ep_obj.prettyName()) foundEpisode = search.findEpisode(self.ep_obj, manualSearch=True) result = False if not foundEpisode: ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % self.ep_obj.prettyName()) logger.log(u"Unable to find a download for " + self.ep_obj.prettyName()) else: # just use the first result for now logger.log(u"Downloading episode from " + foundEpisode.url) result = search.snatchEpisode(foundEpisode) providerModule = foundEpisode.provider if not result: ui.notifications.error('Error while attempting to snatch ' + foundEpisode.name + ', check your logs') elif providerModule is None: ui.notifications.error('Provider is configured incorrectly, unable to download') self.success = result def finish(self): # don't let this linger if something goes wrong if self.success is None: self.success = False generic_queue.QueueItem.finish(self) class RSSSearchQueueItem(generic_queue.QueueItem): def __init__(self): generic_queue.QueueItem.__init__(self, 'RSS Search', RSS_SEARCH) def execute(self): generic_queue.QueueItem.execute(self) self._changeMissingEpisodes() logger.log(u"Beginning search for new episodes on RSS") foundResults = search.searchForNeededEpisodes() if not len(foundResults): logger.log(u"No needed episodes found on the RSS feeds") else: for curResult in foundResults: search.snatchEpisode(curResult) time.sleep(2) generic_queue.QueueItem.finish(self) def _changeMissingEpisodes(self): logger.log(u"Changing all old missing episodes (UNAIRED) to status WANTED") curDate = datetime.date.today().toordinal() myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate < ?", [common.UNAIRED, curDate]) for sqlEp in sqlResults: try: show = helpers.findCertainShow(sickbeard.showList, int(sqlEp["showid"])) except exceptions.MultipleShowObjectsException: logger.log(u"ERROR: expected to find a single show matching " + sqlEp["showid"]) return None if show is None: logger.log(u"Unable to find the show with ID " + str(sqlEp["showid"]) + " in your show list! DB value was " + str(sqlEp), logger.ERROR) return None ep = show.getEpisode(sqlEp["season"], sqlEp["episode"]) with ep.lock: if ep.show.paused: ep.status = common.SKIPPED else: ep.status = common.WANTED ep.saveToDB() class BacklogQueueItem(generic_queue.QueueItem): def __init__(self, show, segment): generic_queue.QueueItem.__init__(self, 'Backlog', BACKLOG_SEARCH) self.priority = generic_queue.QueuePriorities.LOW self.thread_name = 'BACKLOG-' + str(show.tvdbid) self.show = show self.segment = segment logger.log(u"Seeing if we need any episodes from " + self.show.name + " season " + str(self.segment)) myDB = db.DBConnection() # see if there is anything in this season worth searching for if not self.show.air_by_date: statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ?", [self.show.tvdbid, self.segment]) else: segment_year, segment_month = map(int, self.segment.split('-')) min_date = datetime.date(segment_year, segment_month, 1) # it's easier to just hard code this than to worry about rolling the year over or making a month length map if segment_month == 12: max_date = datetime.date(segment_year, 12, 31) else: max_date = datetime.date(segment_year, segment_month + 1, 1) - datetime.timedelta(days=1) statusResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= ?", [self.show.tvdbid, min_date.toordinal(), max_date.toordinal()]) anyQualities, bestQualities = common.Quality.splitQuality(self.show.quality) # @UnusedVariable self.wantSeason = self._need_any_episodes(statusResults, bestQualities) def execute(self): generic_queue.QueueItem.execute(self) results = search.findSeason(self.show, self.segment) # download whatever we find for curResult in results: if curResult: search.snatchEpisode(curResult) time.sleep(5) logger.log(u"Finished searching for episodes from " + self.show.name + " season " + str(self.segment)) self.finish() def _need_any_episodes(self, statusResults, bestQualities): wantSeason = False # check through the list of statuses to see if we want any for curStatusResult in statusResults: curCompositeStatus = int(curStatusResult["status"]) curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus) if bestQualities: highestBestQuality = max(bestQualities) else: highestBestQuality = 0 # if we need a better one then say yes if (curStatus in (common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER) and curQuality < highestBestQuality) or curStatus == common.WANTED: wantSeason = True break return wantSeason
9,268
Python
.py
178
41.011236
158
0.64265
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,305
config.py
midgetspy_Sick-Beard/sickbeard/config.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import cherrypy import os.path import datetime import re import urlparse from sickbeard import encodingKludge as ek from sickbeard import helpers from sickbeard import logger from sickbeard import naming from sickbeard import db import sickbeard # Address poor support for scgi over unix domain sockets # this is not nicely handled by python currently # http://bugs.python.org/issue23636 urlparse.uses_netloc.append('scgi') naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d") naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02") naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(naming_ep_type), 1: [" - " + x for x in naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]} naming_multi_ep_type_text = ("extend", "duplicate", "repeat") naming_sep_type = (" - ", " ") naming_sep_type_text = (" - ", "space") def change_HTTPS_CERT(https_cert): if https_cert == '': sickbeard.HTTPS_CERT = '' return True if os.path.normpath(sickbeard.HTTPS_CERT) != os.path.normpath(https_cert): if helpers.makeDir(os.path.dirname(os.path.abspath(https_cert))): sickbeard.HTTPS_CERT = os.path.normpath(https_cert) logger.log(u"Changed https cert path to " + https_cert) else: return False return True def change_HTTPS_KEY(https_key): if https_key == '': sickbeard.HTTPS_KEY = '' return True if os.path.normpath(sickbeard.HTTPS_KEY) != os.path.normpath(https_key): if helpers.makeDir(os.path.dirname(os.path.abspath(https_key))): sickbeard.HTTPS_KEY = os.path.normpath(https_key) logger.log(u"Changed https key path to " + https_key) else: return False return True def change_LOG_DIR(log_dir, web_log): log_dir_changed = False abs_log_dir = os.path.normpath(os.path.join(sickbeard.DATA_DIR, log_dir)) web_log_value = checkbox_to_value(web_log) if os.path.normpath(sickbeard.LOG_DIR) != abs_log_dir: if helpers.makeDir(abs_log_dir): sickbeard.ACTUAL_LOG_DIR = os.path.normpath(log_dir) sickbeard.LOG_DIR = abs_log_dir logger.sb_log_instance.initLogging() logger.log(u"Initialized new log file in " + sickbeard.LOG_DIR) log_dir_changed = True else: return False if sickbeard.WEB_LOG != web_log_value or log_dir_changed == True: sickbeard.WEB_LOG = web_log_value if sickbeard.WEB_LOG: cherry_log = os.path.join(sickbeard.LOG_DIR, "cherrypy.log") logger.log(u"Change cherry log file to " + cherry_log) else: cherry_log = None logger.log(u"Disable cherry logging") cherrypy.config.update({'log.access_file': cherry_log}) return True def change_NZB_DIR(nzb_dir): if nzb_dir == '': sickbeard.NZB_DIR = '' return True if os.path.normpath(sickbeard.NZB_DIR) != os.path.normpath(nzb_dir): if helpers.makeDir(nzb_dir): sickbeard.NZB_DIR = os.path.normpath(nzb_dir) logger.log(u"Changed NZB folder to " + nzb_dir) else: return False return True def change_TORRENT_DIR(torrent_dir): if torrent_dir == '': sickbeard.TORRENT_DIR = '' return True if os.path.normpath(sickbeard.TORRENT_DIR) != os.path.normpath(torrent_dir): if helpers.makeDir(torrent_dir): sickbeard.TORRENT_DIR = os.path.normpath(torrent_dir) logger.log(u"Changed torrent folder to " + torrent_dir) else: return False return True def change_TV_DOWNLOAD_DIR(tv_download_dir): if tv_download_dir == '': sickbeard.TV_DOWNLOAD_DIR = '' return True if os.path.normpath(sickbeard.TV_DOWNLOAD_DIR) != os.path.normpath(tv_download_dir): if helpers.makeDir(tv_download_dir): sickbeard.TV_DOWNLOAD_DIR = os.path.normpath(tv_download_dir) logger.log(u"Changed TV download folder to " + tv_download_dir) else: return False return True def change_SEARCH_FREQUENCY(freq): sickbeard.SEARCH_FREQUENCY = to_int(freq, default=sickbeard.DEFAULT_SEARCH_FREQUENCY) if sickbeard.SEARCH_FREQUENCY < sickbeard.MIN_SEARCH_FREQUENCY: sickbeard.SEARCH_FREQUENCY = sickbeard.MIN_SEARCH_FREQUENCY sickbeard.currentSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.SEARCH_FREQUENCY) sickbeard.backlogSearchScheduler.cycleTime = datetime.timedelta(minutes=sickbeard.get_backlog_cycle_time()) def change_VERSION_NOTIFY(version_notify): oldSetting = sickbeard.VERSION_NOTIFY sickbeard.VERSION_NOTIFY = version_notify if version_notify == False: sickbeard.NEWEST_VERSION_STRING = None if oldSetting == False and version_notify == True: sickbeard.versionCheckScheduler.action.run() # @UndefinedVariable def CheckSection(CFG, sec): """ Check if INI section exists, if not create it """ try: CFG[sec] return True except: CFG[sec] = {} return False def checkbox_to_value(option, value_on=1, value_off=0): """ Turns checkbox option 'on' or 'true' to value_on (1) any other value returns value_off (0) """ if option == 'on' or option == 'true': return value_on return value_off def clean_host(host, default_port=None): """ Returns host or host:port or empty string from a given url or host If no port is found and default_port is given use host:default_port """ host = host.strip() if host: match_host_port = re.search(r'(?:http.*://)?(?P<host>[^:/]+).?(?P<port>[0-9]*).*', host) cleaned_host = match_host_port.group('host') cleaned_port = match_host_port.group('port') if cleaned_host: if cleaned_port: host = cleaned_host + ':' + cleaned_port elif default_port: host = cleaned_host + ':' + str(default_port) else: host = cleaned_host else: host = '' return host def clean_hosts(hosts, default_port=None): cleaned_hosts = [] for cur_host in [x.strip() for x in hosts.split(",")]: if cur_host: cleaned_host = clean_host(cur_host, default_port) if cleaned_host: cleaned_hosts.append(cleaned_host) if cleaned_hosts: cleaned_hosts = ",".join(cleaned_hosts) else: cleaned_hosts = '' return cleaned_hosts def clean_url(url): """ Returns an cleaned url starting with a scheme and folder with trailing / or an empty string """ if url and url.strip(): url = url.strip() if '://' not in url: url = '//' + url scheme, netloc, path, query, fragment = urlparse.urlsplit(url, 'http') if not path: path = path + '/' cleaned_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) else: cleaned_url = '' return cleaned_url def to_int(val, default=0): """ Return int value of val or default on error """ try: val = int(val) except: val = default return val ################################################################################ # Check_setting_int # ################################################################################ def minimax(val, default, low, high): """ Return value forced within range """ val = to_int(val, default=default) if val < low: return low if val > high: return high return val ################################################################################ # Check_setting_int # ################################################################################ def check_setting_int(config, cfg_name, item_name, def_val): try: my_val = int(config[cfg_name][item_name]) except: my_val = def_val try: config[cfg_name][item_name] = my_val except: config[cfg_name] = {} config[cfg_name][item_name] = my_val logger.log(item_name + " -> " + str(my_val), logger.DEBUG) return my_val ################################################################################ # Check_setting_float # ################################################################################ def check_setting_float(config, cfg_name, item_name, def_val): try: my_val = float(config[cfg_name][item_name]) except: my_val = def_val try: config[cfg_name][item_name] = my_val except: config[cfg_name] = {} config[cfg_name][item_name] = my_val logger.log(item_name + " -> " + str(my_val), logger.DEBUG) return my_val ################################################################################ # Check_setting_str # ################################################################################ def check_setting_str(config, cfg_name, item_name, def_val, log=True): try: my_val = config[cfg_name][item_name] except: my_val = def_val try: config[cfg_name][item_name] = my_val except: config[cfg_name] = {} config[cfg_name][item_name] = my_val if log: logger.log(item_name + " -> " + my_val, logger.DEBUG) else: logger.log(item_name + " -> ******", logger.DEBUG) return my_val class ConfigMigrator(): def __init__(self, config_obj): """ Initializes a config migrator that can take the config from the version indicated in the config file up to the version required by SB """ self.config_obj = config_obj # check the version of the config self.config_version = check_setting_int(config_obj, 'General', 'config_version', sickbeard.CONFIG_VERSION) self.expected_config_version = sickbeard.CONFIG_VERSION self.migration_names = {1: 'Custom naming', 2: 'Sync backup number with version number', 3: 'Rename omgwtfnzb variables', 4: 'Add newznab catIDs', 5: 'Metadata update' } def migrate_config(self): """ Calls each successive migration until the config is the same version as SB expects """ if self.config_version > self.expected_config_version: logger.log_error_and_exit(u"Your config version (" + str(self.config_version) + ") has been incremented past what this version of Sick Beard supports (" + str(self.expected_config_version) + ").\n" + \ "If you have used other forks or a newer version of Sick Beard, your config file may be unusable due to their modifications.") sickbeard.CONFIG_VERSION = self.config_version while self.config_version < self.expected_config_version: next_version = self.config_version + 1 if next_version in self.migration_names: migration_name = ': ' + self.migration_names[next_version] else: migration_name = '' logger.log(u"Backing up config before upgrade") if not helpers.backupVersionedFile(sickbeard.CONFIG_FILE, self.config_version): logger.log_error_and_exit(u"Config backup failed, abort upgrading config") else: logger.log(u"Proceeding with upgrade") # do the migration, expect a method named _migrate_v<num> logger.log(u"Migrating config up to version " + str(next_version) + migration_name) getattr(self, '_migrate_v' + str(next_version))() self.config_version = next_version # save new config after migration sickbeard.CONFIG_VERSION = self.config_version logger.log(u"Saving config file to disk") sickbeard.save_config() # Migration v1: Custom naming def _migrate_v1(self): """ Reads in the old naming settings from your config and generates a new config template from them. """ sickbeard.NAMING_PATTERN = self._name_to_pattern() logger.log(u"Based on your old settings I'm setting your new naming pattern to: " + sickbeard.NAMING_PATTERN) sickbeard.NAMING_CUSTOM_ABD = bool(check_setting_int(self.config_obj, 'General', 'naming_dates', 0)) if sickbeard.NAMING_CUSTOM_ABD: sickbeard.NAMING_ABD_PATTERN = self._name_to_pattern(True) logger.log(u"Adding a custom air-by-date naming pattern to your config: " + sickbeard.NAMING_ABD_PATTERN) else: sickbeard.NAMING_ABD_PATTERN = naming.name_abd_presets[0] sickbeard.NAMING_MULTI_EP = int(check_setting_int(self.config_obj, 'General', 'naming_multi_ep_type', 1)) # see if any of their shows used season folders myDB = db.DBConnection() season_folder_shows = myDB.select("SELECT * FROM tv_shows WHERE flatten_folders = 0") # if any shows had season folders on then prepend season folder to the pattern if season_folder_shows: old_season_format = check_setting_str(self.config_obj, 'General', 'season_folders_format', 'Season %02d') if old_season_format: try: new_season_format = old_season_format % 9 new_season_format = new_season_format.replace('09', '%0S') new_season_format = new_season_format.replace('9', '%S') logger.log(u"Changed season folder format from " + old_season_format + " to " + new_season_format + ", prepending it to your naming config") sickbeard.NAMING_PATTERN = new_season_format + os.sep + sickbeard.NAMING_PATTERN except (TypeError, ValueError): logger.log(u"Can't change " + old_season_format + " to new season format", logger.ERROR) # if no shows had it on then don't flatten any shows and don't put season folders in the config else: logger.log(u"No shows were using season folders before so I'm disabling flattening on all shows") # don't flatten any shows at all myDB.action("UPDATE tv_shows SET flatten_folders = 0") sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders() def _name_to_pattern(self, abd=False): # get the old settings from the file use_periods = bool(check_setting_int(self.config_obj, 'General', 'naming_use_periods', 0)) ep_type = check_setting_int(self.config_obj, 'General', 'naming_ep_type', 0) sep_type = check_setting_int(self.config_obj, 'General', 'naming_sep_type', 0) use_quality = bool(check_setting_int(self.config_obj, 'General', 'naming_quality', 0)) use_show_name = bool(check_setting_int(self.config_obj, 'General', 'naming_show_name', 1)) use_ep_name = bool(check_setting_int(self.config_obj, 'General', 'naming_ep_name', 1)) # make the presets into templates naming_ep_type = ("%Sx%0E", "s%0Se%0E", "S%0SE%0E", "%0Sx%0E") naming_sep_type = (" - ", " ") # set up our data to use if use_periods: show_name = '%S.N' ep_name = '%E.N' ep_quality = '%Q.N' abd_string = '%A.D' else: show_name = '%SN' ep_name = '%EN' ep_quality = '%QN' abd_string = '%A-D' if abd: ep_string = abd_string else: ep_string = naming_ep_type[ep_type] finalName = "" # start with the show name if use_show_name: finalName += show_name + naming_sep_type[sep_type] # add the season/ep stuff finalName += ep_string # add the episode name if use_ep_name: finalName += naming_sep_type[sep_type] + ep_name # add the quality if use_quality: finalName += naming_sep_type[sep_type] + ep_quality if use_periods: finalName = re.sub("\s+", ".", finalName) return finalName # Migration v2: Dummy migration to sync backup number with config version number def _migrate_v2(self): return # Migration v2: Rename omgwtfnzb variables def _migrate_v3(self): """ Reads in the old naming settings from your config and generates a new config template from them. """ # get the old settings from the file and store them in the new variable names sickbeard.OMGWTFNZBS_USERNAME = check_setting_str(self.config_obj, 'omgwtfnzbs', 'omgwtfnzbs_uid', '') sickbeard.OMGWTFNZBS_APIKEY = check_setting_str(self.config_obj, 'omgwtfnzbs', 'omgwtfnzbs_key', '') # Migration v4: Add default newznab catIDs def _migrate_v4(self): """ Update newznab providers so that the category IDs can be set independently via the config """ new_newznab_data = [] old_newznab_data = check_setting_str(self.config_obj, 'Newznab', 'newznab_data', '') if old_newznab_data: old_newznab_data_list = old_newznab_data.split("!!!") for cur_provider_data in old_newznab_data_list: try: name, url, key, enabled = cur_provider_data.split("|") except ValueError: logger.log(u"Skipping Newznab provider string: '" + cur_provider_data + "', incorrect format", logger.ERROR) continue if name == 'Sick Beard Index': key = '0' if name == 'NZBs.org': catIDs = '5030,5040,5070,5090' else: catIDs = '5030,5040' cur_provider_data_list = [name, url, key, catIDs, enabled] new_newznab_data.append("|".join(cur_provider_data_list)) sickbeard.NEWZNAB_DATA = "!!!".join(new_newznab_data) # Migration v5: Metadata upgrade def _migrate_v5(self): """ Updates metadata values to the new format """ """ Quick overview of what the upgrade does: new | old | description (new) ----+-----+-------------------- 1 | 1 | show metadata 2 | 2 | episode metadata 3 | 4 | show fanart 4 | 3 | show poster 5 | - | show banner 6 | 5 | episode thumb 7 | 6 | season poster 8 | - | season banner 9 | - | season all poster 10 | - | season all banner Note that the ini places start at 1 while the list index starts at 0. old format: 0|0|0|0|0|0 -- 6 places new format: 0|0|0|0|0|0|0|0|0|0 -- 10 places Drop the use of use_banner option. Migrate the poster override to just using the banner option (applies to xbmc only). """ metadata_xbmc = check_setting_str(self.config_obj, 'General', 'metadata_xbmc', '0|0|0|0|0|0') metadata_xbmc_12plus = check_setting_str(self.config_obj, 'General', 'metadata_xbmc_12plus', '0|0|0|0|0|0') metadata_mediabrowser = check_setting_str(self.config_obj, 'General', 'metadata_mediabrowser', '0|0|0|0|0|0') metadata_ps3 = check_setting_str(self.config_obj, 'General', 'metadata_ps3', '0|0|0|0|0|0') metadata_wdtv = check_setting_str(self.config_obj, 'General', 'metadata_wdtv', '0|0|0|0|0|0') metadata_tivo = check_setting_str(self.config_obj, 'General', 'metadata_tivo', '0|0|0|0|0|0') metadata_mede8er = check_setting_str(self.config_obj, 'General', 'metadata_mede8er', '0|0|0|0|0|0') use_banner = bool(check_setting_int(self.config_obj, 'General', 'use_banner', 0)) def _migrate_metadata(metadata, metadata_name, use_banner): cur_metadata = metadata.split('|') # if target has the old number of values, do upgrade if len(cur_metadata) == 6: logger.log(u"Upgrading " + metadata_name + " metadata, old value: " + metadata) cur_metadata.insert(4, '0') cur_metadata.append('0') cur_metadata.append('0') cur_metadata.append('0') # swap show fanart, show poster cur_metadata[3], cur_metadata[2] = cur_metadata[2], cur_metadata[3] # if user was using use_banner to override the poster, instead enable the banner option and deactivate poster if metadata_name == 'XBMC' and use_banner: cur_metadata[4], cur_metadata[3] = cur_metadata[3], '0' # write new format metadata = '|'.join(cur_metadata) logger.log(u"Upgrading " + metadata_name + " metadata, new value: " + metadata) elif len(cur_metadata) == 10: metadata = '|'.join(cur_metadata) logger.log(u"Keeping " + metadata_name + " metadata, value: " + metadata) else: logger.log(u"Skipping " + metadata_name + " metadata: '" + metadata + "', incorrect format", logger.ERROR) metadata = '0|0|0|0|0|0|0|0|0|0' logger.log(u"Setting " + metadata_name + " metadata, new value: " + metadata) return metadata sickbeard.METADATA_XBMC = _migrate_metadata(metadata_xbmc, 'XBMC', use_banner) sickbeard.METADATA_XBMC_12PLUS = _migrate_metadata(metadata_xbmc_12plus, 'XBMC 12+', use_banner) sickbeard.METADATA_MEDIABROWSER = _migrate_metadata(metadata_mediabrowser, 'MediaBrowser', use_banner) sickbeard.METADATA_PS3 = _migrate_metadata(metadata_ps3, 'PS3', use_banner) sickbeard.METADATA_WDTV = _migrate_metadata(metadata_wdtv, 'WDTV', use_banner) sickbeard.METADATA_TIVO = _migrate_metadata(metadata_tivo, 'TIVO', use_banner) sickbeard.METADATA_MEDE8ER = _migrate_metadata(metadata_mede8er, 'Mede8er', use_banner) # Migration v6: Synology notifier update def _migrate_v6(self): """ Updates Synology notifier to reflect that their now is an update library option instead misusing the enable option """ # clone use_synoindex to update_library since this now has notification options sickbeard.SYNOINDEX_UPDATE_LIBRARY = bool(check_setting_int(self.config_obj, 'Synology', 'use_synoindex', 0))
24,530
Python
.py
486
39.483539
214
0.578221
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,306
postProcessor.py
midgetspy_Sick-Beard/sickbeard/postProcessor.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import re import subprocess import sickbeard from sickbeard import db from sickbeard import classes from sickbeard import common from sickbeard import exceptions from sickbeard import helpers from sickbeard import history from sickbeard import logger from sickbeard import notifiers from sickbeard import show_name_helpers from sickbeard import scene_exceptions from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex from sickbeard.name_parser.parser import NameParser, InvalidNameException from lib.tvdb_api import tvdb_api, tvdb_exceptions class PostProcessor(object): """ A class which will process a media file according to the post processing settings in the config. """ EXISTS_LARGER = 1 EXISTS_SAME = 2 EXISTS_SMALLER = 3 DOESNT_EXIST = 4 NZB_NAME = 1 FOLDER_NAME = 2 FILE_NAME = 3 def __init__(self, file_path, nzb_name=None, pp_options={}): """ Creates a new post processor with the given file path and optionally an NZB name. file_path: The path to the file to be processed nzb_name: The name of the NZB which resulted in this file being downloaded (optional) """ # absolute path to the folder that is being processed self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path)) # full path to file self.file_path = file_path # file name only self.file_name = ek.ek(os.path.basename, file_path) # the name of the folder only self.folder_name = ek.ek(os.path.basename, self.folder_path) # name of the NZB that resulted in this folder self.nzb_name = nzb_name self.force_replace = pp_options.get('force_replace', False) self.in_history = False self.release_group = None self.release_name = None self.is_proper = False self.log = '' def _log(self, message, level=logger.MESSAGE): """ A wrapper for the internal logger which also keeps track of messages and saves them to a string for later. message: The string to log (unicode) level: The log level to use (optional) """ logger.log(message, level) self.log += message + '\n' def _checkForExistingFile(self, existing_file): """ Checks if a file exists already and if it does whether it's bigger or smaller than the file we are post processing existing_file: The file to compare to Returns: DOESNT_EXIST if the file doesn't exist EXISTS_LARGER if the file exists and is larger than the file we are post processing EXISTS_SMALLER if the file exists and is smaller than the file we are post processing EXISTS_SAME if the file exists and is the same size as the file we are post processing """ if not existing_file: self._log(u"There is no existing file", logger.DEBUG) return PostProcessor.DOESNT_EXIST # if the new file exists, return the appropriate code depending on the size if ek.ek(os.path.isfile, existing_file): # see if it's bigger than our old file if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path): self._log(u"File " + existing_file + " is larger than " + self.file_path, logger.DEBUG) return PostProcessor.EXISTS_LARGER elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path): self._log(u"File " + existing_file + " is the same size as " + self.file_path, logger.DEBUG) return PostProcessor.EXISTS_SAME else: self._log(u"File " + existing_file + " is smaller than " + self.file_path, logger.DEBUG) return PostProcessor.EXISTS_SMALLER else: self._log(u"File " + existing_file + " doesn't exist", logger.DEBUG) return PostProcessor.DOESNT_EXIST def _delete(self, file_path, associated_files=False): """ Deletes the file and optionally all associated files. file_path: The file to delete associated_files: True to delete all files which differ only by extension, False to leave them """ if not file_path: return # figure out which files we want to delete file_list = [file_path] if associated_files: file_list = file_list + helpers.list_associated_files(file_path, base_name_only=True) if not file_list: self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG) return # delete the file and any other files which we want to delete for cur_file in file_list: if ek.ek(os.path.isfile, cur_file): self._log(u"Deleting file " + cur_file, logger.DEBUG) ek.ek(os.remove, cur_file) # do the library update for synoindex notifiers.synoindex_notifier.deleteFile(cur_file) def _combined_file_operation(self, file_path, new_path, new_base_name, associated_files=False, action=None): """ Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location, and optionally move associated files too. file_path: The full path of the media file to act on new_path: Destination path where we want to move/copy the file to new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name. associated_files: Boolean, whether we should copy similarly-named files too action: function that takes an old path and new path and does an operation with them (move/copy) """ if not action: self._log(u"Must provide an action for the combined file operation", logger.ERROR) return file_list = [file_path] if associated_files: file_list = file_list + helpers.list_associated_files(file_path, filter_ext=sickbeard.FILTER_ASSOCIATED_FILES) if not file_list: self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG) return # create base name with file_path (media_file without .extension) old_base_name = file_path.rpartition('.')[0] old_base_name_length = len(old_base_name) # deal with all files for cur_file_path in file_list: cur_file_name = ek.ek(os.path.basename, cur_file_path) # get the extension without . cur_extension = cur_file_path[old_base_name_length + 1:] # replace .nfo with .nfo-orig to avoid conflicts if cur_extension == 'nfo': cur_extension = 'nfo-orig' # If new base name then convert name if new_base_name: new_file_name = new_base_name + '.' + cur_extension # if we're not renaming we still want to change extensions sometimes else: new_file_name = helpers.replaceExtension(cur_file_name, cur_extension) new_file_path = ek.ek(os.path.join, new_path, new_file_name) action(cur_file_path, new_file_path) def _move(self, file_path, new_path, new_base_name, associated_files=False): """ file_path: The full path of the media file to move new_path: Destination path where we want to move the file to new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name. associated_files: Boolean, whether we should move similarly-named files too """ def _int_move(cur_file_path, new_file_path): self._log(u"Moving file from " + cur_file_path + " to " + new_file_path, logger.DEBUG) try: helpers.moveFile(cur_file_path, new_file_path) helpers.chmodAsParent(new_file_path) except (IOError, OSError), e: self._log(u"Unable to move file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move) def _copy(self, file_path, new_path, new_base_name, associated_files=False): """ file_path: The full path of the media file to copy new_path: Destination path where we want to copy the file to new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name. associated_files: Boolean, whether we should copy similarly-named files too """ def _int_copy(cur_file_path, new_file_path): self._log(u"Copying file from " + cur_file_path + " to " + new_file_path, logger.DEBUG) try: helpers.copyFile(cur_file_path, new_file_path) helpers.chmodAsParent(new_file_path) except (IOError, OSError), e: logger.log(u"Unable to copy file " + cur_file_path + " to " + new_file_path + ": " + ex(e), logger.ERROR) raise e self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy) def _history_lookup(self): """ Look up the NZB name in the history and see if it contains a record for self.nzb_name Returns a (tvdb_id, season, [], quality) tuple. tvdb_id, season, quality may be None and episodes may be []. """ to_return = (None, None, [], None) # if we don't have either of these then there's nothing to use to search the history for anyway if not self.nzb_name and not self.folder_name: self.in_history = False return to_return # make a list of possible names to use in the search names = [] if self.nzb_name: names.append(self.nzb_name) if '.' in self.nzb_name: names.append(self.nzb_name.rpartition(".")[0]) if self.folder_name: names.append(self.folder_name) myDB = db.DBConnection() # search the database for a possible match and return immediately if we find one for curName in names: # The underscore character ( _ ) represents a single character to match a pattern from a word or string search_name = re.sub("[\.\-\ ]", "_", curName) sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [search_name]) if len(sql_results) == 0: continue tvdb_id = int(sql_results[0]["showid"]) season = int(sql_results[0]["season"]) quality = int(sql_results[0]["quality"]) if quality == common.Quality.UNKNOWN: quality = None self.in_history = True to_return = (tvdb_id, season, [], quality) self._log(u"Found result in history: " + str(to_return), logger.DEBUG) return to_return self.in_history = False return to_return def _analyze_name(self, name, file_name=True): """ Takes a name and tries to figure out a show, season, and episode from it. name: A string which we want to analyze to determine show info from (unicode) Returns a (tvdb_id, season, [episodes], quality) tuple. tvdb_id, season, quality may be None and episodes may be []. if none were found. """ logger.log(u"Analyzing name " + repr(name)) to_return = (None, None, [], None) if not name: return to_return name = helpers.remove_non_release_groups(helpers.remove_extension(name)) # parse the name to break it into show name, season, and episode np = NameParser(False) parse_result = np.parse(name) self._log(u"Parsed " + name + " into " + str(parse_result).decode('utf-8', 'xmlcharrefreplace'), logger.DEBUG) if parse_result.air_by_date: season = -1 episodes = [parse_result.air_date] else: season = parse_result.season_number episodes = parse_result.episode_numbers to_return = (None, season, episodes, None) # do a scene reverse-lookup to get a list of all possible names name_list = show_name_helpers.sceneToNormalShowNames(parse_result.series_name) if not name_list: return (None, season, episodes, None) # try finding name in DB for cur_name in name_list: self._log(u"Looking up " + cur_name + u" in the DB", logger.DEBUG) db_result = helpers.searchDBForShow(cur_name) if db_result: self._log(u"Lookup successful, using tvdb id " + str(db_result[0]), logger.DEBUG) self._finalize(parse_result) return (int(db_result[0]), season, episodes, None) # try finding name in scene exceptions for cur_name in name_list: self._log(u"Checking scene exceptions for a match on " + cur_name, logger.DEBUG) scene_id = scene_exceptions.get_scene_exception_by_name(cur_name) if scene_id: self._log(u"Scene exception lookup got tvdb id " + str(scene_id) + u", using that", logger.DEBUG) self._finalize(parse_result) return (scene_id, season, episodes, None) # try finding name on TVDB for cur_name in name_list: try: t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **sickbeard.TVDB_API_PARMS) self._log(u"Looking up name " + cur_name + u" on TVDB", logger.DEBUG) showObj = t[cur_name] except (tvdb_exceptions.tvdb_exception): # if none found, search on all languages try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() ltvdb_api_parms['search_all_languages'] = True t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **ltvdb_api_parms) self._log(u"Looking up name " + cur_name + u" in all languages on TVDB", logger.DEBUG) showObj = t[cur_name] except (tvdb_exceptions.tvdb_exception, IOError): pass continue except (IOError): continue self._log(u"Lookup successful, using tvdb id " + str(showObj["id"]), logger.DEBUG) self._finalize(parse_result) return (int(showObj["id"]), season, episodes, None) self._finalize(parse_result) return to_return def _finalize(self, parse_result): self.release_group = parse_result.release_group # remember whether it's a proper self.is_proper = parse_result.is_proper # if the result is complete then set release name if parse_result.series_name and ((parse_result.season_number is not None and parse_result.episode_numbers) or parse_result.air_date ) and parse_result.release_group: if not self.release_name: self.release_name = helpers.remove_extension(ek.ek(os.path.basename, parse_result.original_name)) else: logger.log(u"Parse result not sufficient (all following have to be set). will not save release name", logger.DEBUG) logger.log(u"Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG) logger.log(u"Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG) logger.log(u"Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG) logger.log(u" or Parse result(air_date): " + str(parse_result.air_date), logger.DEBUG) logger.log(u"Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG) def _find_info(self): """ For a given file try to find the showid, season, and episode. """ tvdb_id = season = quality = None episodes = [] # try to look up the nzb in history attempt_list = [self._history_lookup, # try to analyze the nzb name lambda: self._analyze_name(self.nzb_name), # try to analyze the file name lambda: self._analyze_name(self.file_name), # try to analyze the dir name lambda: self._analyze_name(self.folder_name), # try to analyze the file + dir names together lambda: self._analyze_name(self.file_path), # try to analyze the dir + file name together as one name lambda: self._analyze_name(self.folder_name + u' ' + self.file_name) ] # attempt every possible method to get our info for cur_attempt in attempt_list: try: (cur_tvdb_id, cur_season, cur_episodes, cur_quality) = cur_attempt() except InvalidNameException, e: logger.log(u"Unable to parse, skipping: " + ex(e), logger.DEBUG) continue # if we already did a successful history lookup then keep that tvdb_id value if cur_tvdb_id and not (self.in_history and tvdb_id): tvdb_id = cur_tvdb_id if cur_quality and not (self.in_history and quality): quality = cur_quality if cur_season is not None: season = cur_season if cur_episodes: episodes = cur_episodes # for air-by-date shows we need to look up the season/episode from database if season == -1 and tvdb_id and episodes: self._log(u"Looks like this is an air-by-date show, attempting to convert the date to season/episode", logger.DEBUG) airdate = episodes[0].toordinal() myDB = db.DBConnection() sql_result = myDB.select("SELECT season, episode FROM tv_episodes WHERE showid = ? and airdate = ?", [tvdb_id, airdate]) if sql_result: season = int(sql_result[0][0]) episodes = [int(sql_result[0][1])] else: self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(tvdb_id) + u", skipping", logger.DEBUG) # we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers episodes = [] continue # if there's no season then we can hopefully just use 1 automatically elif season is None and tvdb_id: myDB = db.DBConnection() numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [tvdb_id]) if int(numseasonsSQlResult[0][0]) == 1 and season is None: self._log(u"Don't have a season number, but this show appears to only have 1 season, setting seasonnumber to 1...", logger.DEBUG) season = 1 if tvdb_id and season is not None and episodes: return (tvdb_id, season, episodes, quality) return (tvdb_id, season, episodes, quality) def _get_ep_obj(self, tvdb_id, season, episodes): """ Retrieve the TVEpisode object requested. tvdb_id: The TVDBID of the show (int) season: The season of the episode (int) episodes: A list of episodes to find (list of ints) If the episode(s) can be found then a TVEpisode object with the correct related eps will be instantiated and returned. If the episode can't be found then None will be returned. """ show_obj = None self._log(u"Loading show object for tvdb_id " + str(tvdb_id), logger.DEBUG) # find the show in the showlist try: show_obj = helpers.findCertainShow(sickbeard.showList, tvdb_id) except exceptions.MultipleShowObjectsException: raise # TODO: later I'll just log this, for now I want to know about it ASAP # if we can't find the show then there's nothing we can really do if not show_obj: error_msg = u"This show isn't in your list, you need to add it to SB before post-processing an episode" self._log(error_msg, logger.ERROR) raise exceptions.PostProcessingFailed(error_msg) root_ep = None for cur_episode in episodes: episode = int(cur_episode) self._log(u"Retrieving episode object for " + str(season) + "x" + str(episode), logger.DEBUG) # now that we've figured out which episode this file is just load it manually try: curEp = show_obj.getEpisode(season, episode) except exceptions.EpisodeNotFoundException, e: error_msg = u"Unable to create episode: " + ex(e) self._log(error_msg, logger.DEBUG) raise exceptions.PostProcessingFailed(error_msg) # associate all the episodes together under a single root episode if root_ep is None: root_ep = curEp root_ep.relatedEps = [] elif curEp not in root_ep.relatedEps: root_ep.relatedEps.append(curEp) return root_ep def _get_quality(self, ep_obj): """ Determines the quality of the file that is being post processed by parsing through the data available. ep_obj: The TVEpisode object related to the file we are post processing Returns: A quality value found in common.Quality """ ep_quality = common.Quality.UNKNOWN # nzb name is the most reliable if it exists, followed by folder name and lastly file name name_list = [self.nzb_name, self.folder_name, self.file_name] # search all possible names for our new quality, in case the file or dir doesn't have it for cur_name in name_list: # some stuff might be None at this point still if not cur_name: continue ep_quality = common.Quality.nameQuality(cur_name) self._log(u"Looking up quality for name " + cur_name + u", got " + common.Quality.qualityStrings[ep_quality], logger.DEBUG) # if we find a good one then use it if ep_quality != common.Quality.UNKNOWN: logger.log(cur_name + u" looks like it has quality " + common.Quality.qualityStrings[ep_quality] + ", using that", logger.DEBUG) return ep_quality # Try getting quality from the episode (snatched) status if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER: oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) # @UnusedVariable if ep_quality != common.Quality.UNKNOWN: self._log(u"The old status had a quality in it, using that: " + common.Quality.qualityStrings[ep_quality], logger.DEBUG) return ep_quality # Try guessing quality from the file name ep_quality = common.Quality.assumeQuality(self.file_name) self._log(u"Guessing quality for name " + self.file_name + u", got " + common.Quality.qualityStrings[ep_quality], logger.DEBUG) if ep_quality != common.Quality.UNKNOWN: logger.log(self.file_name + u" looks like it has quality " + common.Quality.qualityStrings[ep_quality] + ", using that", logger.DEBUG) return ep_quality return ep_quality def _run_extra_scripts(self, ep_obj): """ Executes any extra scripts defined in the config. ep_obj: The object to use when calling the extra script """ for curScriptName in sickbeard.EXTRA_SCRIPTS: # generate a safe command line string to execute the script and provide all the parameters try: script_cmd = [piece for piece in re.split("( |\\\".*?\\\"|'.*?')", curScriptName) if piece.strip()] script_cmd = script_cmd + [ep_obj.location.encode(sickbeard.SYS_ENCODING), self.file_path.encode(sickbeard.SYS_ENCODING), str(ep_obj.show.tvdbid), str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate) ] # use subprocess to run the command and capture output self._log(u"Executing command " + str(script_cmd)) p = subprocess.Popen(script_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, err = p.communicate() # @UnusedVariable self._log(u"Script result: " + str(out), logger.DEBUG) except Exception, e: self._log(u"Unable to run extra_script: " + ex(e)) def _safe_replace(self, ep_obj, new_ep_quality): """ Determines if the new episode can safely replace old episode. Episodes which are expected (snatched) or larger than the existing episode are priority, others are not. ep_obj: The TVEpisode object in question new_ep_quality: The quality of the episode that is being processed Returns: True if the episode can safely replace old episode, False otherwise. """ # if SB snatched this then assume it's safe if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER: self._log(u"Sick Beard snatched this episode, marking it safe to replace", logger.DEBUG) return True old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) # if old episode is not downloaded/archived then it's safe if old_ep_status != common.DOWNLOADED and old_ep_status != common.ARCHIVED: self._log(u"Existing episode status is not downloaded/archived, marking it safe to replace", logger.DEBUG) return True if old_ep_status == common.ARCHIVED: self._log(u"Existing episode status is archived, marking it unsafe to replace", logger.DEBUG) return False # Status downloaded. Quality/ size checks # if manual post process option is set to force_replace then it's safe if self.force_replace: self._log(u"Processed episode is set to force replace existing episode, marking it safe to replace", logger.DEBUG) return True # if the file processed is higher quality than the existing episode then it's safe if new_ep_quality > old_ep_quality: if new_ep_quality != common.Quality.UNKNOWN: self._log(u"Existing episode status is not snatched but processed episode appears to be better quality than existing episode, marking it safe to replace", logger.DEBUG) return True else: self._log(u"Episode already exists in database and processed episode has unknown quality, marking it unsafe to replace", logger.DEBUG) return False # if there's an existing downloaded file with same quality, check filesize to decide if new_ep_quality == old_ep_quality: self._log(u"Episode already exists in database and has same quality as processed episode", logger.DEBUG) # check for an existing file self._log(u"Checking size of existing file: " + ep_obj.location, logger.DEBUG) existing_file_status = self._checkForExistingFile(ep_obj.location) if existing_file_status == PostProcessor.EXISTS_LARGER: self._log(u"File exists and new file is smaller, marking it unsafe to replace", logger.DEBUG) return False elif existing_file_status == PostProcessor.EXISTS_SAME: self._log(u"File exists and new file is same size, marking it unsafe to replace", logger.DEBUG) return False elif existing_file_status == PostProcessor.EXISTS_SMALLER: self._log(u"File exists and new file is larger, marking it safe to replace", logger.DEBUG) return True elif existing_file_status == PostProcessor.DOESNT_EXIST: if not ek.ek(os.path.isdir, ep_obj.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS: self._log(u"File and Show location doesn't exist, marking it unsafe to replace", logger.DEBUG) return False else: self._log(u"File doesn't exist, marking it safe to replace", logger.DEBUG) return True else: self._log(u"Unknown file status for: " + ep_obj.location + "This should never happen, please log this as a bug.", logger.ERROR) return False # if there's an existing file with better quality if new_ep_quality < old_ep_quality and old_ep_quality != common.Quality.UNKNOWN: self._log(u"Episode already exists in database and processed episode has lower quality, marking it unsafe to replace", logger.DEBUG) return False self._log(u"None of the conditions were met, marking it unsafe to replace", logger.DEBUG) return False def process(self): """ Post-process a given file """ self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")") if ek.ek(os.path.isdir, self.file_path): self._log(u"File " + self.file_path + " seems to be a directory") return False # reset per-file stuff self.in_history = False # try to find the file info (tvdb_id, season, episodes, quality) = self._find_info() # if we don't have it then give up if not tvdb_id or season is None or not episodes: self._log(u"Not enough information to determine what episode this is", logger.DEBUG) self._log(u"Quitting post-processing", logger.DEBUG) return False # retrieve/create the corresponding TVEpisode objects ep_obj = self._get_ep_obj(tvdb_id, season, episodes) # get the quality of the episode we're processing if quality: self._log(u"Snatch history had a quality in it, using that: " + common.Quality.qualityStrings[quality], logger.DEBUG) new_ep_quality = quality else: new_ep_quality = self._get_quality(ep_obj) logger.log(u"Quality of the processing episode: " + str(new_ep_quality), logger.DEBUG) # see if it's safe to replace existing episode (is download snatched, PROPER, better quality) safe_replace = self._safe_replace(ep_obj, new_ep_quality) # if it's not safe to replace, stop here if not safe_replace: self._log(u"Quitting post-processing", logger.DEBUG) return False # if the file is safe to replace then we're going to replace it even if it exists else: self._log(u"This download is marked as safe to replace existing file", logger.DEBUG) # delete the existing file (and company) for cur_ep in [ep_obj] + ep_obj.relatedEps: try: self._delete(cur_ep.location, associated_files=True) # clean up any left over folders if cur_ep.location: helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location), keep_dir=ep_obj.show._location) except (OSError, IOError): raise exceptions.PostProcessingFailed(u"Unable to delete the existing files") # if the show directory doesn't exist then make it if allowed if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS: self._log(u"Show directory doesn't exist, creating it", logger.DEBUG) try: ek.ek(os.mkdir, ep_obj.show._location) # do the library update for synoindex notifiers.synoindex_notifier.addFolder(ep_obj.show._location) except (OSError, IOError): raise exceptions.PostProcessingFailed(u"Unable to create the show directory: " + ep_obj.show._location) # get metadata for the show (but not episode because it hasn't been fully processed) ep_obj.show.writeMetadata(True) # update the ep info before we rename so the quality & release name go into the name properly for cur_ep in [ep_obj] + ep_obj.relatedEps: if self.release_name: self._log(u"Found release name " + self.release_name, logger.DEBUG) cur_ep.release_name = self.release_name else: cur_ep.release_name = "" cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality) # find the destination folder try: proper_path = ep_obj.proper_path() proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path) dest_path = ek.ek(os.path.dirname, proper_absolute_path) except exceptions.ShowDirNotFoundException: raise exceptions.PostProcessingFailed(u"Unable to post-process an episode if the show dir doesn't exist, quitting") self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG) # create any folders we need if not helpers.make_dirs(dest_path): raise exceptions.PostProcessingFailed(u"Unable to create destination folder: " + dest_path) # figure out the base name of the resulting episode file if sickbeard.RENAME_EPISODES: orig_extension = self.file_name.rpartition('.')[-1] new_base_name = ek.ek(os.path.basename, proper_path) new_file_name = new_base_name + '.' + orig_extension else: # if we're not renaming then there's no new base name, we'll just use the existing name new_base_name = None new_file_name = self.file_name try: # move the episode and associated files to the show dir if sickbeard.KEEP_PROCESSED_DIR: self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES) else: self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES) except (OSError, IOError): raise exceptions.PostProcessingFailed(u"Unable to move the files to destination folder: " + dest_path) # put the new location in the database for cur_ep in [ep_obj] + ep_obj.relatedEps: with cur_ep.lock: cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name) cur_ep.saveToDB() # log it to history history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group) # send notifiers download notification if not ep_obj.show.skip_notices: notifiers.notify_download(ep_obj.prettyName()) # generate nfo/tbn ep_obj.createMetaFiles() ep_obj.saveToDB() # send notifiers library update notifiers.update_library(ep_obj) self._run_extra_scripts(ep_obj) return True
37,843
Python
.py
647
44.965997
185
0.609791
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,307
browser.py
midgetspy_Sick-Beard/sickbeard/browser.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os import string import cherrypy from sickbeard import encodingKludge as ek # use the built-in if it's available (python 2.6), if not use the included library try: import json except ImportError: from lib import simplejson as json # this is for the drive letter code, it only works on windows if os.name == 'nt': from ctypes import windll # adapted from http://stackoverflow.com/questions/827371/is-there-a-way-to-list-all-the-available-drive-letters-in-python/827490 def getWinDrives(): """ Return list of detected drives """ assert os.name == 'nt' drives = [] bitmask = windll.kernel32.GetLogicalDrives() #@UndefinedVariable for letter in string.uppercase: if bitmask & 1: drives.append(letter) bitmask >>= 1 return drives def foldersAtPath(path, includeParent=False): """ Returns a list of dictionaries with the folders contained at the given path Give the empty string as the path to list the contents of the root path under Unix this means "/", on Windows this will be a list of drive letters) """ # walk up the tree until we find a valid path while path and not os.path.isdir(path): if path == os.path.dirname(path): path = '' break else: path = os.path.dirname(path) if path == "": if os.name == 'nt': entries = [{'current_path': 'Root'}] for letter in getWinDrives(): letterPath = letter + ':\\' entries.append({'name': letterPath, 'path': letterPath}) return entries else: path = '/' # fix up the path and find the parent path = os.path.abspath(os.path.normpath(path)) parentPath = os.path.dirname(path) # if we're at the root then the next step is the meta-node showing our drive letters if path == parentPath and os.name == 'nt': parentPath = "" fileList = [{ 'name': filename, 'path': ek.ek(os.path.join, path, filename) } for filename in ek.ek(os.listdir, path)] fileList = filter(lambda entry: ek.ek(os.path.isdir, entry['path']), fileList) # prune out directories to proect the user from doing stupid things (already lower case the dir to reduce calls) hideList = ["boot", "bootmgr", "cache", "msocache", "recovery", "$recycle.bin", "recycler", "system volume information", "temporary internet files"] # windows specific hideList += [".fseventd", ".spotlight", ".trashes", ".vol", "cachedmessages", "caches", "trash"] # osx specific fileList = filter(lambda entry: entry['name'].lower() not in hideList, fileList) fileList = sorted(fileList, lambda x, y: cmp(os.path.basename(x['name']).lower(), os.path.basename(y['path']).lower())) entries = [{'current_path': path}] if includeParent and parentPath != path: entries.append({ 'name': "..", 'path': parentPath }) entries.extend(fileList) return entries class WebFileBrowser: @cherrypy.expose def index(self, path=''): cherrypy.response.headers['Content-Type'] = "application/json" return json.dumps(foldersAtPath(path, True)) @cherrypy.expose def complete(self, term): cherrypy.response.headers['Content-Type'] = "application/json" paths = [entry['path'] for entry in foldersAtPath(os.path.dirname(term)) if 'path' in entry] return json.dumps( paths )
4,179
Python
.py
89
41.393258
171
0.680924
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,308
gh_api.py
midgetspy_Sick-Beard/sickbeard/gh_api.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. try: import json except ImportError: from lib import simplejson as json import helpers class GitHub(object): """ Simple api wrapper for the Github API v3. Currently only supports the small thing that SB needs it for - list of commits. """ def __init__(self, github_repo_user, github_repo, branch='master'): self.github_repo_user = github_repo_user self.github_repo = github_repo self.branch = branch def _access_API(self, path, params=None): """ Access the API at the path given and with the optional params given. path: A list of the path elements to use (eg. ['repos', 'midgetspy', 'Sick-Beard', 'commits']) params: Optional dict of name/value pairs for extra params to send. (eg. {'per_page': 10}) Returns a deserialized json object of the result. Doesn't do any error checking (hope it works). """ url = 'https://api.github.com/' + '/'.join(path) if params and type(params) is dict: url += '?' + '&'.join([str(x) + '=' + str(params[x]) for x in params.keys()]) data = helpers.getURL(url) if data: json_data = json.loads(data) return json_data else: return [] def commits(self): """ Uses the API to get a list of the 100 most recent commits from the specified user/repo/branch, starting from HEAD. user: The github username of the person whose repo you're querying repo: The repo name to query branch: Optional, the branch name to show commits from Returns a deserialized json object containing the commit info. See http://developer.github.com/v3/repos/commits/ """ access_API = self._access_API(['repos', self.github_repo_user, self.github_repo, 'commits'], params={'per_page': 100, 'sha': self.branch}) return access_API def compare(self, base, head, per_page=1): """ Uses the API to get a list of compares between base and head. user: The github username of the person whose repo you're querying repo: The repo name to query base: Start compare from branch head: Current commit sha or branch name to compare per_page: number of items per page Returns a deserialized json object containing the compare info. See http://developer.github.com/v3/repos/commits/ """ access_API = self._access_API(['repos', self.github_repo_user, self.github_repo, 'compare', base + '...' + head], params={'per_page': per_page}) return access_API
3,445
Python
.py
69
42.15942
153
0.657579
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,309
properFinder.py
midgetspy_Sick-Beard/sickbeard/properFinder.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import operator import sickbeard from sickbeard import db from sickbeard import exceptions from sickbeard.exceptions import ex from sickbeard import helpers, logger, show_name_helpers from sickbeard import providers from sickbeard import search from sickbeard import history from sickbeard.common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, Quality from lib.tvdb_api import tvdb_api, tvdb_exceptions from name_parser.parser import NameParser, InvalidNameException class ProperFinder(): def run(self): if not sickbeard.DOWNLOAD_PROPERS: return logger.log(u"Beginning the search for new propers") propers = self._getProperList() self._downloadPropers(propers) def _getProperList(self): propers = {} # for each provider get a list of the propers for curProvider in providers.sortedProviderList(): if not curProvider.isActive(): continue search_date = datetime.datetime.today() - datetime.timedelta(days=2) logger.log(u"Searching for any new PROPER releases from " + curProvider.name) try: curPropers = curProvider.findPropers(search_date) except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue # if they haven't been added by a different provider than add the proper to the list for x in curPropers: name = self._genericName(x.name) if not name in propers: logger.log(u"Found new proper: " + x.name, logger.DEBUG) x.provider = curProvider propers[name] = x # take the list of unique propers and get it sorted by sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True) finalPropers = [] for curProper in sortedPropers: # parse the file name try: myParser = NameParser(False) parse_result = myParser.parse(curProper.name) except InvalidNameException: logger.log(u"Unable to parse the filename " + curProper.name + " into a valid episode", logger.DEBUG) continue if not parse_result.episode_numbers: logger.log(u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode", logger.DEBUG) continue # populate our Proper instance if parse_result.air_by_date: curProper.season = -1 curProper.episode = parse_result.air_date else: curProper.season = parse_result.season_number if parse_result.season_number != None else 1 curProper.episode = parse_result.episode_numbers[0] curProper.quality = Quality.nameQuality(curProper.name) # for each show in our list for curShow in sickbeard.showList: if not parse_result.series_name: continue genericName = self._genericName(parse_result.series_name) # get the scene name masks sceneNames = set(show_name_helpers.makeSceneShowSearchStrings(curShow)) # for each scene name mask for curSceneName in sceneNames: # if it matches if genericName == self._genericName(curSceneName): logger.log(u"Successful match! Result " + parse_result.series_name + " matched to show " + curShow.name, logger.DEBUG) # set the tvdbid in the db to the show's tvdbid curProper.tvdbid = curShow.tvdbid # since we found it, break out break # if we found something in the inner for loop break out of this one if curProper.tvdbid != -1: break if curProper.tvdbid == -1: continue if not show_name_helpers.filterBadReleases(curProper.name): logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, ignoring it", logger.DEBUG) continue show = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid) if not show: logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid), logger.ERROR) continue if show.rls_ignore_words and search.filter_release_name(curProper.name, show.rls_ignore_words): logger.log(u"Ignoring " + curProper.name + " based on ignored words filter: " + show.rls_ignore_words, logger.MESSAGE) continue if show.rls_require_words and not search.filter_release_name(curProper.name, show.rls_require_words): logger.log(u"Ignoring " + curProper.name + " based on required words filter: " + show.rls_require_words, logger.MESSAGE) continue # if we have an air-by-date show then get the real season/episode numbers if curProper.season == -1 and curProper.tvdbid: tvdb_lang = show.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang try: t = tvdb_api.Tvdb(**ltvdb_api_parms) epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0] curProper.season = int(epObj["seasonnumber"]) curProper.episodes = [int(epObj["episodenumber"])] except tvdb_exceptions.tvdb_episodenotfound: logger.log(u"Unable to find episode with date " + str(curProper.episode) + " for show " + parse_result.series_name + ", skipping", logger.WARNING) continue # check if we actually want this proper (if it's the right quality) sqlResults = db.DBConnection().select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [curProper.tvdbid, curProper.season, curProper.episode]) if not sqlResults: continue oldStatus, oldQuality = Quality.splitCompositeStatus(int(sqlResults[0]["status"])) # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones) if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != curProper.quality: continue # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season, curProper.episode) not in map(operator.attrgetter('tvdbid', 'season', 'episode'), finalPropers): logger.log(u"Found a proper that we need: " + str(curProper.name)) finalPropers.append(curProper) return finalPropers def _downloadPropers(self, properList): for curProper in properList: historyLimit = datetime.datetime.today() - datetime.timedelta(days=30) # make sure the episode has been downloaded before myDB = db.DBConnection() historyResults = myDB.select( "SELECT resource FROM history " "WHERE showid = ? AND season = ? AND episode = ? AND quality = ? AND date >= ? " "AND action IN (" + ",".join([str(x) for x in Quality.SNATCHED]) + ")", [curProper.tvdbid, curProper.season, curProper.episode, curProper.quality, historyLimit.strftime(history.dateFormat)]) # if we didn't download this episode in the first place we don't know what quality to use for the proper so we can't do it if len(historyResults) == 0: logger.log(u"Unable to find an original history entry for proper " + curProper.name + " so I'm not downloading it.") continue else: # make sure that none of the existing history downloads are the same proper we're trying to download clean_proper_name = self._genericName(helpers.remove_non_release_groups(curProper.name)) isSame = False for curResult in historyResults: # if the result exists in history already we need to skip it if self._genericName(helpers.remove_non_release_groups(curResult["resource"])) == clean_proper_name: isSame = True break if isSame: logger.log(u"This proper is already in history, skipping it", logger.DEBUG) continue # get the episode object showObj = helpers.findCertainShow(sickbeard.showList, curProper.tvdbid) if showObj == None: logger.log(u"Unable to find the show with tvdbid " + str(curProper.tvdbid) + " so unable to download the proper", logger.ERROR) continue epObj = showObj.getEpisode(curProper.season, curProper.episode) # make the result object result = curProper.provider.getResult([epObj]) result.url = curProper.url result.name = curProper.name result.quality = curProper.quality # snatch it search.snatchEpisode(result, SNATCHED_PROPER) def _genericName(self, name): return name.replace(".", " ").replace("-", " ").replace("_", " ").lower()
10,971
Python
.py
183
44.989071
189
0.60733
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,310
scene_exceptions.py
midgetspy_Sick-Beard/sickbeard/scene_exceptions.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import re from sickbeard import helpers from sickbeard import name_cache from sickbeard import logger from sickbeard import db def get_scene_exceptions(tvdb_id): """ Given a tvdb_id, return a list of all the scene exceptions. """ myDB = db.DBConnection("cache.db") exceptions = myDB.select("SELECT DISTINCT show_name FROM scene_exceptions WHERE tvdb_id = ?", [tvdb_id]) return [cur_exception["show_name"] for cur_exception in exceptions] def get_scene_exception_by_name(show_name): """ Given a show name, return the tvdbid of the exception, None if no exception is present. """ myDB = db.DBConnection("cache.db") # try the obvious case first exception_result = myDB.select("SELECT tvdb_id FROM scene_exceptions WHERE LOWER(show_name) = ?", [show_name.lower()]) if exception_result: return int(exception_result[0]["tvdb_id"]) all_exception_results = myDB.select("SELECT DISTINCT show_name, tvdb_id FROM scene_exceptions") for cur_exception in all_exception_results: cur_exception_name = cur_exception["show_name"] cur_tvdb_id = int(cur_exception["tvdb_id"]) if show_name.lower() in (cur_exception_name.lower(), helpers.sanitizeSceneName(cur_exception_name).lower().replace('.', ' ')): logger.log(u"Scene exception lookup got tvdb id " + str(cur_tvdb_id) + u", using that", logger.DEBUG) return cur_tvdb_id return None def retrieve_exceptions(): """ Looks up the exceptions on github, parses them into a dict, and inserts them into the scene_exceptions table in cache.db. Also clears the scene name cache. """ provider = 'sb_tvdb_scene_exceptions' remote_exception_dict = {} local_exception_dict = {} query_list = [] # remote exceptions are stored on github pages url = 'http://midgetspy.github.io/sb_tvdb_scene_exceptions/exceptions.txt' logger.log(u"Check scene exceptions update") # get remote exceptions url_data = helpers.getURL(url) if not url_data: # when url_data is None, trouble connecting to github logger.log(u"Check scene exceptions update failed. Unable to get URL: " + url, logger.ERROR) return False else: # each exception is on one line with the format tvdb_id: 'show name 1', 'show name 2', etc for cur_line in url_data.splitlines(): cur_line = cur_line.decode('utf-8') tvdb_id, sep, aliases = cur_line.partition(':') # @UnusedVariable if not aliases: continue cur_tvdb_id = int(tvdb_id) # regex out the list of shows, taking \' into account alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)] remote_exception_dict[cur_tvdb_id] = alias_list # get local exceptions myDB = db.DBConnection("cache.db", row_type="dict") sql_result = myDB.select("SELECT tvdb_id, show_name FROM scene_exceptions WHERE provider=?;", [provider]) for cur_result in sql_result: cur_tvdb_id = cur_result["tvdb_id"] if cur_tvdb_id not in local_exception_dict: local_exception_dict[cur_tvdb_id] = [] local_exception_dict[cur_tvdb_id].append(cur_result["show_name"]) # check remote against local for added exceptions for cur_tvdb_id in remote_exception_dict: if cur_tvdb_id not in local_exception_dict: local_exception_dict[cur_tvdb_id] = [] for cur_exception_name in remote_exception_dict[cur_tvdb_id]: if cur_exception_name not in local_exception_dict[cur_tvdb_id]: query_list.append(["INSERT INTO scene_exceptions (tvdb_id,show_name,provider) VALUES (?,?,?);", [cur_tvdb_id, cur_exception_name, provider]]) # check local against remote for removed exceptions for cur_tvdb_id in local_exception_dict: if cur_tvdb_id not in remote_exception_dict: query_list.append(["DELETE FROM scene_exceptions WHERE tvdb_id=? AND provider=?;", [cur_tvdb_id, provider]]) else: for cur_exception_name in local_exception_dict[cur_tvdb_id]: if cur_exception_name not in remote_exception_dict[cur_tvdb_id]: query_list.append(["DELETE FROM scene_exceptions WHERE tvdb_id= ? AND show_name=? AND provider=?;", [cur_tvdb_id, cur_exception_name, provider]]) if query_list: logger.log(u"Updating scene exceptions") myDB.mass_action(query_list, logTransaction=True) logger.log(u"Clear name cache") name_cache.clearCache() logger.log(u"Performing a vacuum on database: " + myDB.filename) myDB.action("VACUUM") else: logger.log(u"No scene exceptions update needed") return True
5,828
Python
.py
109
44.201835
170
0.649903
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,311
history.py
midgetspy_Sick-Beard/sickbeard/history.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import db import datetime from sickbeard.common import SNATCHED, Quality dateFormat = "%Y%m%d%H%M%S" def _logHistoryItem(action, showid, season, episode, quality, resource, provider, source): logDate = datetime.datetime.today().strftime(dateFormat) myDB = db.DBConnection() myDB.action("INSERT INTO history (action, date, showid, season, episode, quality, resource, provider, source) VALUES (?,?,?,?,?,?,?,?,?)", [action, logDate, showid, season, episode, quality, resource, provider, source]) def logSnatch(searchResult): for curEpObj in searchResult.episodes: showid = int(curEpObj.show.tvdbid) season = int(curEpObj.season) episode = int(curEpObj.episode) quality = searchResult.quality providerClass = searchResult.provider if providerClass is not None: provider = providerClass.name source = searchResult.provider.providerType else: provider = "unknown" source = "unknown" action = Quality.compositeStatus(SNATCHED, searchResult.quality) resource = searchResult.name _logHistoryItem(action, showid, season, episode, quality, resource, provider, source) def logDownload(episode, filename, new_ep_quality, release_group=None, source=""): showid = int(episode.show.tvdbid) season = int(episode.season) epNum = int(episode.episode) quality = new_ep_quality # store the release group as the provider if possible if release_group: provider = release_group else: provider = -1 action = episode.status _logHistoryItem(action, showid, season, epNum, quality, filename, provider, source)
2,544
Python
.py
54
40.333333
143
0.700082
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,312
search.py
midgetspy_Sick-Beard/sickbeard/search.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import os import traceback import re import sickbeard from common import SNATCHED, WANTED, Quality, SEASON_RESULT, MULTI_EP_RESULT from sickbeard import logger, db, show_name_helpers, exceptions, helpers from sickbeard import sab from sickbeard import nzbget from sickbeard import history from sickbeard import notifiers from sickbeard import nzbSplitter from sickbeard import ui from sickbeard import encodingKludge as ek from sickbeard import providers from sickbeard.exceptions import ex from sickbeard.providers.generic import GenericProvider def _downloadResult(result): """ Downloads a result to the appropriate black hole folder. Returns a bool representing success. result: SearchResult instance to download. """ resProvider = result.provider newResult = False if resProvider is None: logger.log(u"Invalid provider name - this is a coding error, report it please", logger.ERROR) return False # nzbs with an URL can just be downloaded from the provider if result.resultType == "nzb": newResult = resProvider.downloadResult(result) # if it's an nzb data result elif result.resultType == "nzbdata": # get the final file path to the nzb fileName = ek.ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb") logger.log(u"Saving NZB to " + fileName) newResult = True # save the data to disk try: with ek.ek(open, fileName, 'w') as fileOut: fileOut.write(result.extraInfo[0]) helpers.chmodAsParent(fileName) except EnvironmentError, e: logger.log(u"Error trying to save NZB to black hole: " + ex(e), logger.ERROR) newResult = False elif resProvider.providerType == "torrent": newResult = resProvider.downloadResult(result) else: logger.log(u"Invalid provider type - this is a coding error, report it please", logger.ERROR) return False return newResult def snatchEpisode(result, endStatus=SNATCHED): """ Contains the internal logic necessary to actually "snatch" a result that has been found. Returns a bool representing success. result: SearchResult instance to be snatched. endStatus: the episode status that should be used for the episode object once it's snatched. """ # NZBs can be sent straight to downloader or saved to disk if result.resultType in ("nzb", "nzbdata"): if sickbeard.NZB_METHOD == "blackhole": dlResult = _downloadResult(result) elif sickbeard.NZB_METHOD == "sabnzbd": dlResult = sab.sendNZB(result) elif sickbeard.NZB_METHOD == "nzbget": dlResult = nzbget.sendNZB(result) else: logger.log(u"Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR) dlResult = False # torrents are always saved to disk elif result.resultType == "torrent": dlResult = _downloadResult(result) else: logger.log(u"Unknown result type, unable to download it", logger.ERROR) dlResult = False if dlResult == False: return False ui.notifications.message('Episode snatched', result.name) history.logSnatch(result) # don't notify when we re-download an episode for curEpObj in result.episodes: with curEpObj.lock: curEpObj.status = Quality.compositeStatus(endStatus, result.quality) curEpObj.saveToDB() if not curEpObj.show.skip_notices and curEpObj.status not in Quality.DOWNLOADED: notifiers.notify_snatch(curEpObj.prettyName()) return True def searchForNeededEpisodes(): logger.log(u"Searching all providers for any needed episodes") foundResults = {} didSearch = False # ask all providers for any episodes it finds for curProvider in providers.sortedProviderList(): if not curProvider.isActive(): continue curFoundResults = {} try: curFoundResults = curProvider.searchRSS() except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue didSearch = True # pick a single result for each episode, respecting existing results for curEp in curFoundResults: if curEp.show.paused: logger.log(u"Show " + curEp.show.name + " is paused, ignoring all RSS items for " + curEp.prettyName(), logger.DEBUG) continue # find the best result for the current episode bestResult = None for curResult in curFoundResults[curEp]: if not bestResult or bestResult.quality < curResult.quality: bestResult = curResult bestResult = pickBestResult(curFoundResults[curEp], curEp.show) # if all results were rejected move on to the next episode if not bestResult: logger.log(u"All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG) continue # if it's already in the list (from another provider) and the newly found quality is no better then skip it if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality: continue foundResults[curEp] = bestResult if not didSearch: logger.log(u"No NZB/Torrent providers found or enabled in the sickbeard config. Please check your settings.", logger.ERROR) return foundResults.values() def filter_release_name(name, filter_words): """ Filters out results based on filter_words name: name to check filter_words : Words to filter on, separated by comma Returns: False if the release name is OK, True if it contains one of the filter_words """ if filter_words: for test_word in filter_words.split(','): test_word = test_word.strip() if test_word: if re.search('(^|[\W_])' + test_word + '($|[\W_])', name, re.I): logger.log(u"" + name + " contains word: " + test_word, logger.DEBUG) return True return False def pickBestResult(results, show, quality_list=None): logger.log(u"Picking the best result out of " + str([x.name for x in results]), logger.DEBUG) # find the best result for the current episode bestResult = None for cur_result in results: logger.log(u"Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality]) if quality_list and cur_result.quality not in quality_list: logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG) continue if show.rls_ignore_words and filter_release_name(cur_result.name, show.rls_ignore_words): logger.log(u"Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words, logger.MESSAGE) continue if show.rls_require_words and not filter_release_name(cur_result.name, show.rls_require_words): logger.log(u"Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words, logger.MESSAGE) continue if not bestResult or bestResult.quality < cur_result.quality and cur_result.quality != Quality.UNKNOWN: bestResult = cur_result elif bestResult.quality == cur_result.quality: if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower(): bestResult = cur_result elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower(): bestResult = cur_result if bestResult: logger.log(u"Picked " + bestResult.name + " as the best", logger.MESSAGE) else: logger.log(u"No result picked.", logger.DEBUG) return bestResult def isFinalResult(result): """ Checks if the given result is good enough quality that we can stop searching for other ones. If the result is the highest quality in both the any/best quality lists then this function returns True, if not then it's False """ logger.log(u"Checking if we should keep searching after we've found " + result.name, logger.DEBUG) show_obj = result.episodes[0].show any_qualities, best_qualities = Quality.splitQuality(show_obj.quality) # if there is a redownload that's higher than this then we definitely need to keep looking if best_qualities and result.quality < max(best_qualities): return False # if there's no redownload that's higher (above) and this is the highest initial download then we're good elif any_qualities and result.quality == max(any_qualities): return True elif best_qualities and result.quality == max(best_qualities): # if this is the best redownload but we have a higher initial download then keep looking if any_qualities and result.quality < max(any_qualities): return False # if this is the best redownload and we don't have a higher initial download then we're done else: return True # if we got here than it's either not on the lists, they're empty, or it's lower than the highest required else: return False def findEpisode(episode, manualSearch=False): logger.log(u"Searching for " + episode.prettyName()) foundResults = [] didSearch = False for curProvider in providers.sortedProviderList(): if not curProvider.isActive(): continue try: curFoundResults = curProvider.findEpisode(episode, manualSearch=manualSearch) except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue didSearch = True # skip non-tv crap curFoundResults = filter(lambda x: show_name_helpers.filterBadReleases(x.name) and show_name_helpers.isGoodResult(x.name, episode.show), curFoundResults) # loop all results and see if any of them are good enough that we can stop searching done_searching = False for cur_result in curFoundResults: done_searching = isFinalResult(cur_result) logger.log(u"Should we stop searching after finding " + cur_result.name + ": " + str(done_searching), logger.DEBUG) if done_searching: break foundResults += curFoundResults # if we did find a result that's good enough to stop then don't continue if done_searching: break if not didSearch: logger.log(u"No NZB/Torrent providers found or enabled in the sickbeard config. Please check your settings.", logger.ERROR) bestResult = pickBestResult(foundResults, episode.show) return bestResult def findSeason(show, season): logger.log(u"Searching for stuff we need from " + show.name + " season " + str(season)) foundResults = {} didSearch = False for curProvider in providers.sortedProviderList(): if not curProvider.isActive(): continue try: curResults = curProvider.findSeasonResults(show, season) # make a list of all the results for this provider for curEp in curResults: # skip non-tv crap curResults[curEp] = filter(lambda x: show_name_helpers.filterBadReleases(x.name) and show_name_helpers.isGoodResult(x.name, show), curResults[curEp]) if curEp in foundResults: foundResults[curEp] += curResults[curEp] else: foundResults[curEp] = curResults[curEp] except exceptions.AuthException, e: logger.log(u"Authentication error: " + ex(e), logger.ERROR) continue except Exception, e: logger.log(u"Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) continue didSearch = True if not didSearch: logger.log(u"No NZB/Torrent providers found or enabled in the sickbeard config. Please check your settings.", logger.ERROR) finalResults = [] anyQualities, bestQualities = Quality.splitQuality(show.quality) # pick the best season NZB BestSeasonResult = None if SEASON_RESULT in foundResults: BestSeasonResult = pickBestResult(foundResults[SEASON_RESULT], show, anyQualities + bestQualities) highest_wanted_quality_overall = 0 for cur_season in foundResults: for cur_result in foundResults[cur_season]: if cur_result.quality != Quality.UNKNOWN and cur_result.quality in anyQualities + bestQualities and cur_result.quality > highest_wanted_quality_overall: highest_wanted_quality_overall = cur_result.quality logger.log(u"The highest wanted quality of any match is " + Quality.qualityStrings[highest_wanted_quality_overall], logger.DEBUG) # check if complete season pack can be used if BestSeasonResult: # get the quality of the season nzb seasonQual = BestSeasonResult.quality logger.log(u"The quality of the season result is " + Quality.qualityStrings[seasonQual], logger.DEBUG) # get all episodes in season from db myDB = db.DBConnection() sql_result = myDB.select("SELECT episode, airdate FROM tv_episodes WHERE showid = ? AND season = ? ORDER BY episode DESC;", [show.tvdbid, season]) if sql_result: last_airdate = datetime.date.fromordinal(sql_result[0]['airdate']) all_episodes = sorted([int(x['episode']) for x in sql_result]) else: last_airdate = datetime.date.fromordinal(1) all_episodes = [] logger.log(u"Episode list: " + str(all_episodes), logger.DEBUG) today = datetime.date.today() # only use complete season packs if season ended # only use complete season as fallback if season ended > 7 days season_ended = False use_season_fallback = False if last_airdate == datetime.date.fromordinal(1) or last_airdate > today: logger.log(u"Ignoring " + BestSeasonResult.name + ", airdate of last episode in season: " + str(last_airdate) + " is never or > today", logger.DEBUG) elif last_airdate + datetime.timedelta(days=7) <= today: season_ended = True use_season_fallback = True elif last_airdate < today: season_ended = True logger.log(u"Ignoring " + BestSeasonResult.name + " as fallback, airdate of last episode in season: " + str(last_airdate) + " is < 7 days", logger.DEBUG) # check if all or some episodes of the season are wanted want_all_eps = True want_some_eps = False for cur_ep_num in all_episodes: if not show.wantEpisode(season, cur_ep_num, seasonQual): want_all_eps = False else: want_some_eps = True # if every episode is needed in the season and there's nothing better then just download this and be done with it if season_ended and want_all_eps and BestSeasonResult.quality == highest_wanted_quality_overall: logger.log(u"Every episode in this season is needed, downloading the whole season " + BestSeasonResult.name) epObjs = [] for cur_ep_num in all_episodes: epObjs.append(show.getEpisode(season, cur_ep_num)) BestSeasonResult.episodes = epObjs return [BestSeasonResult] elif not want_some_eps: logger.log(u"No episodes from this season are wanted at this quality, ignoring the result of " + BestSeasonResult.name, logger.DEBUG) else: # if not all episodes are wanted try splitting up the complete season pack if BestSeasonResult.provider.providerType == GenericProvider.NZB: logger.log(u"Try breaking apart the NZB and adding the individual ones to our results", logger.DEBUG) # break it apart and add them as the lowest priority results individualResults = nzbSplitter.splitResult(BestSeasonResult) individualResults = filter(lambda x: show_name_helpers.filterBadReleases(x.name) and show_name_helpers.isGoodResult(x.name, show), individualResults) for curResult in individualResults: if len(curResult.episodes) == 1: epNum = curResult.episodes[0].episode elif len(curResult.episodes) > 1: epNum = MULTI_EP_RESULT if epNum in foundResults: foundResults[epNum].append(curResult) else: foundResults[epNum] = [curResult] else: # if not all episodes are wanted, splitting up the complete season pack for torrents is not possible # all we can do is leech the entire torrent, user will have to select which episodes not do download in his torrent client if use_season_fallback: # Creating a multi-ep result from a torrent Season result logger.log(u"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!") epObjs = [] for cur_ep_num in all_episodes: # only add wanted episodes for comparing/filter later with single results if show.wantEpisode(season, cur_ep_num, BestSeasonResult.quality): epObjs.append(show.getEpisode(season, cur_ep_num)) BestSeasonResult.episodes = epObjs if MULTI_EP_RESULT in foundResults: foundResults[MULTI_EP_RESULT].append(BestSeasonResult) else: foundResults[MULTI_EP_RESULT] = [BestSeasonResult] # go through multi-ep results and see if we really want them or not, get rid of the rest multiResults = {} if MULTI_EP_RESULT in foundResults: for multiResult in foundResults[MULTI_EP_RESULT]: logger.log(u"Check multi-episode result against single episode results" + multiResult.name, logger.DEBUG) # see how many of the eps that this result covers aren't covered by single results in_single_results = [] not_in_single_results = [] for epObj in multiResult.episodes: epNum = epObj.episode # if we have results for the episode if epNum in foundResults and len(foundResults[epNum]) > 0: in_single_results.append(epNum) else: not_in_single_results.append(epNum) logger.log(u"Multi-episode check result, episodes not in single results: " + str(not_in_single_results) + ", episodes in single results: " + str(in_single_results), logger.DEBUG) if not not_in_single_results: logger.log(u"All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG) continue # check if these eps are already covered by another multi-result multiNeededEps = [] multiNotNeededEps = [] for epObj in multiResult.episodes: epNum = epObj.episode if epNum in multiResults: multiNotNeededEps.append(epNum) else: multiNeededEps.append(epNum) logger.log(u"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(multiNotNeededEps), logger.DEBUG) if not multiNeededEps: logger.log(u"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result", logger.DEBUG) continue # if we're keeping this multi-result then remember it for epObj in multiResult.episodes: multiResults[epObj.episode] = multiResult # don't bother with the single result if we're going to get it with a multi result for epObj in multiResult.episodes: epNum = epObj.episode if epNum in foundResults: logger.log(u"A needed multi-episode result overlaps with a single-episode result for ep #" + str(epNum) + ", removing the single-episode results from the list", logger.DEBUG) del foundResults[epNum] finalResults += set(multiResults.values()) # of all the single ep results narrow it down to the best one for each episode for curEp in foundResults: if curEp in (MULTI_EP_RESULT, SEASON_RESULT): continue if len(foundResults[curEp]) == 0: continue finalResults.append(pickBestResult(foundResults[curEp], show)) return finalResults
23,079
Python
.py
415
43.848193
195
0.646688
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,313
name_cache.py
midgetspy_Sick-Beard/sickbeard/name_cache.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from sickbeard import db from sickbeard.helpers import sanitizeSceneName def addNameToCache(name, tvdb_id): """ Adds the show & tvdb id to the scene_names table in cache.db. name: The show name to cache tvdb_id: The tvdb id that this show should be cached with (can be None/0 for unknown) """ # standardize the name we're using to account for small differences in providers name = sanitizeSceneName(name) if not tvdb_id: tvdb_id = 0 cacheDB = db.DBConnection('cache.db') cacheDB.action("INSERT INTO scene_names (tvdb_id, name) VALUES (?, ?)", [tvdb_id, name]) def retrieveNameFromCache(name): """ Looks up the given name in the scene_names table in cache.db. name: The show name to look up. Returns: the tvdb id that resulted from the cache lookup or None if the show wasn't found in the cache """ # standardize the name we're using to account for small differences in providers name = sanitizeSceneName(name) cacheDB = db.DBConnection('cache.db') cache_results = cacheDB.select("SELECT * FROM scene_names WHERE name = ?", [name]) if not cache_results: return None return int(cache_results[0]["tvdb_id"]) def clearCache(): """ Deletes all "unknown" entries from the cache (names with tvdb_id of 0). """ cacheDB = db.DBConnection('cache.db') cacheDB.action("DELETE FROM scene_names WHERE tvdb_id = ?", [0])
2,259
Python
.py
50
40.1
107
0.705287
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,314
nzbget.py
midgetspy_Sick-Beard/sickbeard/nzbget.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import httplib import datetime import urllib import urlparse import sickbeard from base64 import standard_b64encode import xmlrpclib from sickbeard.exceptions import ex from sickbeard.providers.generic import GenericProvider from sickbeard import config from sickbeard import logger from common import Quality def sendNZB(nzb): if not sickbeard.NZBGET_HOST: logger.log(u"No NZBGet host found in configuration. Please configure it.", logger.ERROR) return False nzb_filename = nzb.name + ".nzb" try: url = config.clean_url(sickbeard.NZBGET_HOST) scheme, netloc, path, query, fragment = urlparse.urlsplit(url) # @UnusedVariable if sickbeard.NZBGET_USERNAME or sickbeard.NZBGET_PASSWORD: netloc = urllib.quote_plus(sickbeard.NZBGET_USERNAME.encode("utf-8", 'ignore')) + u":" + urllib.quote_plus(sickbeard.NZBGET_PASSWORD.encode("utf-8", 'ignore')) + u"@" + netloc url = urlparse.urlunsplit((scheme, netloc, u"/xmlrpc", "", "")) logger.log(u"Sending NZB to NZBGet: %s" % nzb.name) logger.log(u"NZBGet URL: " + url, logger.DEBUG) nzbGetRPC = xmlrpclib.ServerProxy(url.encode("utf-8", 'ignore')) if nzbGetRPC.writelog("INFO", "SickBeard connected to drop off " + nzb_filename + " any moment now."): logger.log(u"Successful connected to NZBGet", logger.DEBUG) else: logger.log(u"Successful connected to NZBGet, but unable to send a message", logger.ERROR) except httplib.socket.error: logger.log(u"Please check if NZBGet is running. NZBGet is not responding.", logger.ERROR) return False except xmlrpclib.ProtocolError, e: if (e.errmsg == "Unauthorized"): logger.log(u"NZBGet username or password is incorrect.", logger.ERROR) else: logger.log(u"NZBGet protocol error: " + e.errmsg, logger.ERROR) return False except Exception, e: logger.log(u"NZBGet sendNZB failed. URL: " + url + " Error: " + ex(e), logger.ERROR) return False # if it aired recently make it high priority and generate dupekey/dupescore add_to_top = False nzbgetprio = dupescore = 0 dupekey = "" for curEp in nzb.episodes: if dupekey == "": dupekey = "SickBeard-" + str(curEp.show.tvdbid) dupekey += "-" + str(curEp.season) + "." + str(curEp.episode) if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7): add_to_top = True nzbgetprio = 100 # tweak dupescore based off quality, higher score wins if nzb.quality != Quality.UNKNOWN: dupescore = nzb.quality * 100 if nzb.quality == Quality.SNATCHED_PROPER: dupescore += 10 nzbget_result = None nzbcontent64 = None # if we get a raw data result we encode contents and pass that if nzb.resultType == "nzbdata": data = nzb.extraInfo[0] nzbcontent64 = standard_b64encode(data) logger.log(u"Attempting to send NZB to NZBGet (" + sickbeard.NZBGET_CATEGORY + ")", logger.DEBUG) try: # find out nzbget version to branch logic, 0.8.x and older will return 0 nzbget_version_str = nzbGetRPC.version() nzbget_version = config.to_int(nzbget_version_str[:nzbget_version_str.find(".")]) # v8 and older, no priority or dupe info if nzbget_version == 0: if nzbcontent64: nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, add_to_top, nzbcontent64) else: # appendurl not supported on older versions, so d/l nzb data from url ourselves if nzb.resultType == "nzb": genProvider = GenericProvider("") data = genProvider.getURL(nzb.url) if data: nzbcontent64 = standard_b64encode(data) nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, add_to_top, nzbcontent64) # v13+ has a new combined append method that accepts both (url and content) elif nzbget_version >= 13: if nzbcontent64: nzbget_result = nzbGetRPC.append(nzb_filename, nzbcontent64, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, False, dupekey, dupescore, "score") else: nzbget_result = nzbGetRPC.append(nzb_filename, nzb.url, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, False, dupekey, dupescore, "score") # the return value has changed from boolean to integer (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.) if nzbget_result > 0: nzbget_result = True else: nzbget_result = False # v12 pass dupekey + dupescore elif nzbget_version == 12: if nzbcontent64: nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzbcontent64, False, dupekey, dupescore, "score") else: nzbget_result = nzbGetRPC.appendurl(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzb.url, False, dupekey, dupescore, "score") # v9+ pass priority, no dupe info else: if nzbcontent64: nzbget_result = nzbGetRPC.append(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzbcontent64) else: nzbget_result = nzbGetRPC.appendurl(nzb_filename, sickbeard.NZBGET_CATEGORY, nzbgetprio, False, nzb.url) if nzbget_result: logger.log(u"NZB sent to NZBGet successfully", logger.DEBUG) return True else: logger.log(u"NZBGet could not add " + nzb_filename + " to the queue", logger.ERROR) return False except: logger.log(u"Connect Error to NZBGet: could not add " + nzb_filename + " to the queue", logger.ERROR) return False return False
6,941
Python
.py
131
43
188
0.651515
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,315
tv.py
midgetspy_Sick-Beard/sickbeard/tv.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os.path import datetime import threading import re import glob import sickbeard import xml.etree.cElementTree as etree from name_parser.parser import NameParser, InvalidNameException from lib.tvdb_api import tvdb_api, tvdb_exceptions from sickbeard import db from sickbeard import helpers, exceptions, logger from sickbeard.exceptions import ex from sickbeard import tvrage from sickbeard import image_cache from sickbeard import encodingKludge as ek from common import Quality, Overview, statusStrings from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, UNKNOWN from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, NAMING_LIMITED_EXTEND_E_PREFIXED class TVShow(object): def __init__(self, tvdbid, lang=""): self.tvdbid = tvdbid self._location = "" self.name = "" self.tvrid = 0 self.tvrname = "" self.network = "" self.genre = "" self.runtime = 0 self.quality = int(sickbeard.QUALITY_DEFAULT) self.flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT) self.status = "" self.airs = "" self.startyear = 0 self.paused = 0 self.skip_notices = 0 self.air_by_date = 0 self.lang = lang self.last_update_tvdb = 1 self.rls_ignore_words = "" self.rls_require_words = "" self.lock = threading.Lock() self._isDirGood = False self.episodes = {} otherShow = helpers.findCertainShow(sickbeard.showList, self.tvdbid) if otherShow is not None: raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists") self.loadFromDB() def _getLocation(self): # no dir check needed if missing show dirs are created during post-processing if sickbeard.CREATE_MISSING_SHOW_DIRS: return self._location if ek.ek(os.path.isdir, self._location): return self._location else: raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it") if self._isDirGood: return self._location else: raise exceptions.NoNFOException("Show folder doesn't exist, you shouldn't be using it") def _setLocation(self, newLocation): logger.log(u"Setter sets location to " + newLocation, logger.DEBUG) # Don't validate dir if user wants to add shows without creating a dir if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation): self._location = newLocation self._isDirGood = True else: raise exceptions.NoNFOException("Invalid folder for the show!") location = property(_getLocation, _setLocation) # delete references to anything that's not in the internal lists def flushEpisodes(self): for curSeason in self.episodes: for curEp in self.episodes[curSeason]: myEp = self.episodes[curSeason][curEp] self.episodes[curSeason][curEp] = None del myEp def getAllEpisodes(self, season=None, has_location=False): myDB = db.DBConnection() sql_selection = "SELECT season, episode, " # subselection to detect multi-episodes early, share_location > 0 sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location != '' AND location = tve.location AND episode != tve.episode) AS share_location " sql_selection = sql_selection + " FROM tv_episodes tve WHERE showid = " + str(self.tvdbid) if season is not None: sql_selection = sql_selection + " AND season = " + str(season) if has_location: sql_selection = sql_selection + " AND location != '' " # need ORDER episode ASC to rename multi-episodes in order S01E01-02 sql_selection = sql_selection + " ORDER BY season ASC, episode ASC" results = myDB.select(sql_selection) ep_list = [] for cur_result in results: cur_ep = self.getEpisode(int(cur_result["season"]), int(cur_result["episode"])) if cur_ep: cur_ep.relatedEps = [] if cur_ep.location: # if there is a location, check if it's a multi-episode (share_location > 0) and put them in relatedEps if cur_result["share_location"] > 0: related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND location = ? AND episode != ? ORDER BY episode ASC", [self.tvdbid, cur_ep.season, cur_ep.location, cur_ep.episode]) for cur_related_ep in related_eps_result: related_ep = self.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"])) if related_ep not in cur_ep.relatedEps: cur_ep.relatedEps.append(related_ep) ep_list.append(cur_ep) return ep_list def getEpisode(self, season, episode, file=None, noCreate=False): if not season in self.episodes: self.episodes[season] = {} ep = None if not episode in self.episodes[season] or self.episodes[season][episode] is None: if noCreate: return None logger.log(str(self.tvdbid) + u": An object for episode " + str(season) + "x" + str(episode) + " didn't exist in the cache, trying to create it", logger.DEBUG) if file is not None: ep = TVEpisode(self, season, episode, file) else: ep = TVEpisode(self, season, episode) if ep is not None: self.episodes[season][episode] = ep return self.episodes[season][episode] def should_update(self, update_date=datetime.date.today()): # if show is not 'Ended' always update (status 'Continuing' or '') if self.status != 'Ended': return True # run logic against the current show latest aired and next unaired data to see if we should bypass 'Ended' status cur_tvdbid = self.tvdbid graceperiod = datetime.timedelta(days=30) myDB = db.DBConnection() last_airdate = datetime.date.fromordinal(1) # get latest aired episode to compare against today - graceperiod and today + graceperiod sql_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status > '1' ORDER BY airdate DESC LIMIT 1", [cur_tvdbid]) if sql_result: last_airdate = datetime.date.fromordinal(sql_result[0]['airdate']) if last_airdate >= (update_date - graceperiod) and last_airdate <= (update_date + graceperiod): return True # get next upcoming UNAIRED episode to compare against today + graceperiod sql_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status = '1' ORDER BY airdate ASC LIMIT 1", [cur_tvdbid]) if sql_result: next_airdate = datetime.date.fromordinal(sql_result[0]['airdate']) if next_airdate <= (update_date + graceperiod): return True last_update_tvdb = datetime.date.fromordinal(self.last_update_tvdb) # in the first year after ended (last airdate), update every 30 days if (update_date - last_airdate) < datetime.timedelta(days=450) and (update_date - last_update_tvdb) > datetime.timedelta(days=30): return True return False def writeShowNFO(self): result = False if not ek.ek(os.path.isdir, self._location): logger.log(str(self.tvdbid) + u": Show dir doesn't exist, skipping NFO generation") return False logger.log(str(self.tvdbid) + u": Writing NFOs for show") for cur_provider in sickbeard.metadata_provider_dict.values(): result = cur_provider.create_show_metadata(self) or result return result def writeMetadata(self, show_only=False): if not ek.ek(os.path.isdir, self._location): logger.log(str(self.tvdbid) + u": Show dir doesn't exist, skipping NFO generation") return self.getImages() self.writeShowNFO() if not show_only: self.writeEpisodeNFOs() def writeEpisodeNFOs(self): if not ek.ek(os.path.isdir, self._location): logger.log(str(self.tvdbid) + u": Show dir doesn't exist, skipping NFO generation") return logger.log(str(self.tvdbid) + u": Writing NFOs for all episodes") myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.tvdbid]) for epResult in sqlResults: logger.log(str(self.tvdbid) + u": Retrieving/creating episode " + str(epResult["season"]) + u"x" + str(epResult["episode"]), logger.DEBUG) curEp = self.getEpisode(epResult["season"], epResult["episode"]) curEp.createMetaFiles() # find all media files in the show folder and create episodes for as many as possible def loadEpisodesFromDir(self): if not ek.ek(os.path.isdir, self._location): logger.log(str(self.tvdbid) + u": Show dir doesn't exist, not loading episodes from disk") return logger.log(str(self.tvdbid) + u": Loading all episodes from the show directory " + self._location) # get file list mediaFiles = helpers.listMediaFiles(self._location) # create TVEpisodes from each media file (if possible) for mediaFile in mediaFiles: curEpisode = None logger.log(str(self.tvdbid) + u": Creating episode from " + mediaFile, logger.DEBUG) try: curEpisode = self.makeEpFromFile(ek.ek(os.path.join, self._location, mediaFile)) except (exceptions.ShowNotFoundException, exceptions.EpisodeNotFoundException), e: logger.log(u"Episode " + mediaFile + " returned an exception: " + ex(e), logger.ERROR) continue except exceptions.EpisodeDeletedException: logger.log(u"The episode deleted itself when I tried making an object for it", logger.DEBUG) if curEpisode is None: continue if not curEpisode.release_name: ep_file_name = ek.ek(os.path.basename, curEpisode.location) ep_base_name = helpers.remove_non_release_groups(helpers.remove_extension(ep_file_name)) parse_result = None try: np = NameParser(False) parse_result = np.parse(ep_base_name) except InvalidNameException: pass if not ' ' in ep_base_name and parse_result and parse_result.release_group: logger.log(u"Name " + ep_base_name + u" gave release group of " + parse_result.release_group + ", seems valid", logger.DEBUG) curEpisode.release_name = ep_base_name # store the reference in the show if curEpisode is not None: curEpisode.saveToDB() def loadEpisodesFromDB(self): logger.log(u"Loading all episodes from the DB") myDB = db.DBConnection() sql = "SELECT * FROM tv_episodes WHERE showid = ?" sqlResults = myDB.select(sql, [self.tvdbid]) scannedEps = {} ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if self.lang: ltvdb_api_parms['language'] = self.lang t = tvdb_api.Tvdb(**ltvdb_api_parms) cachedShow = t[self.tvdbid] cachedSeasons = {} for curResult in sqlResults: deleteEp = False curSeason = int(curResult["season"]) curEpisode = int(curResult["episode"]) if curSeason not in cachedSeasons: try: cachedSeasons[curSeason] = cachedShow[curSeason] except tvdb_exceptions.tvdb_seasonnotfound, e: logger.log(u"Error when trying to load the episode from TVDB: " + e.message, logger.WARNING) deleteEp = True if not curSeason in scannedEps: scannedEps[curSeason] = {} logger.log(u"Loading episode " + str(curSeason) + "x" + str(curEpisode) + " from the DB", logger.DEBUG) try: curEp = self.getEpisode(curSeason, curEpisode) # if we found out that the ep is no longer on TVDB then delete it from our database too if deleteEp: curEp.deleteEpisode() curEp.loadFromDB(curSeason, curEpisode) curEp.loadFromTVDB(tvapi=t, cachedSeason=cachedSeasons[curSeason]) scannedEps[curSeason][curEpisode] = True except exceptions.EpisodeDeletedException: logger.log(u"Tried loading an episode from the DB that should have been deleted, skipping it", logger.DEBUG) continue return scannedEps def loadEpisodesFromTVDB(self, cache=True): # There's gotta be a better way of doing this but we don't wanna # change the cache value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if not cache: ltvdb_api_parms['cache'] = False if self.lang: ltvdb_api_parms['language'] = self.lang try: t = tvdb_api.Tvdb(**ltvdb_api_parms) showObj = t[self.tvdbid] except tvdb_exceptions.tvdb_error: logger.log(u"TVDB timed out, unable to update episodes from TVDB", logger.ERROR) return None logger.log(str(self.tvdbid) + u": Loading all episodes from theTVDB...") scannedEps = {} for season in showObj: scannedEps[season] = {} for episode in showObj[season]: # need some examples of wtf episode 0 means to decide if we want it or not if episode == 0: continue try: ep = self.getEpisode(season, episode) except exceptions.EpisodeNotFoundException: logger.log(str(self.tvdbid) + u": TVDB object for " + str(season) + "x" + str(episode) + " is incomplete, skipping this episode") continue else: try: ep.loadFromTVDB(tvapi=t) except exceptions.EpisodeDeletedException: logger.log(u"The episode was deleted, skipping the rest of the load") continue with ep.lock: logger.log(str(self.tvdbid) + u": Loading info from theTVDB for episode " + str(season) + "x" + str(episode), logger.DEBUG) ep.loadFromTVDB(season, episode, tvapi=t) if ep.dirty: ep.saveToDB() scannedEps[season][episode] = True # Done updating save last update date self.last_update_tvdb = datetime.date.today().toordinal() self.saveToDB() return scannedEps def setTVRID(self, force=False): if self.tvrid != 0 and not force: logger.log(u"No need to get the TVRage ID, it's already populated", logger.DEBUG) return logger.log(u"Attempting to retrieve the TVRage ID", logger.DEBUG) try: # load the tvrage object, it will set the ID in its constructor if possible tvrage.TVRage(self) self.saveToDB() except exceptions.TVRageException, e: logger.log(u"Couldn't get TVRage ID because we're unable to sync TVDB and TVRage: " + ex(e), logger.DEBUG) return def getImages(self, fanart=None, poster=None): fanart_result = poster_result = banner_result = False season_posters_result = season_banners_result = season_all_poster_result = season_all_banner_result = False for cur_provider in sickbeard.metadata_provider_dict.values(): # FIXME: Needs to not show this message if the option is not enabled? logger.log(u"Running metadata routines for " + cur_provider.name, logger.DEBUG) fanart_result = cur_provider.create_fanart(self) or fanart_result poster_result = cur_provider.create_poster(self) or poster_result banner_result = cur_provider.create_banner(self) or banner_result season_posters_result = cur_provider.create_season_posters(self) or season_posters_result season_banners_result = cur_provider.create_season_banners(self) or season_banners_result season_all_poster_result = cur_provider.create_season_all_poster(self) or season_all_poster_result season_all_banner_result = cur_provider.create_season_all_banner(self) or season_all_banner_result return fanart_result or poster_result or banner_result or season_posters_result or season_banners_result or season_all_poster_result or season_all_banner_result def loadLatestFromTVRage(self): try: # load the tvrage object tvr = tvrage.TVRage(self) newEp = tvr.findLatestEp() if newEp is not None: logger.log(u"TVRage gave us an episode object - saving it for now", logger.DEBUG) newEp.saveToDB() # make an episode out of it except exceptions.TVRageException, e: logger.log(u"Unable to add TVRage info: " + ex(e), logger.WARNING) # make a TVEpisode object from a media file def makeEpFromFile(self, file): if not ek.ek(os.path.isfile, file): logger.log(str(self.tvdbid) + u": That isn't even a real file dude... " + file) return None logger.log(str(self.tvdbid) + u": Creating episode object from " + file, logger.DEBUG) try: myParser = NameParser() parse_result = myParser.parse(file) except InvalidNameException: logger.log(u"Unable to parse the filename " + file + " into a valid episode", logger.ERROR) return None if len(parse_result.episode_numbers) == 0 and not parse_result.air_by_date: logger.log(u"parse_result: " + str(parse_result)) logger.log(u"No episode number found in " + file + ", ignoring it", logger.ERROR) return None # for now lets assume that any episode in the show dir belongs to that show season = parse_result.season_number if parse_result.season_number is not None else 1 episodes = parse_result.episode_numbers rootEp = None # if we have an air-by-date show then get the real season/episode numbers if parse_result.air_by_date: try: # There's gotta be a better way of doing this but we don't wanna # change the cache value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if self.lang: ltvdb_api_parms['language'] = self.lang t = tvdb_api.Tvdb(**ltvdb_api_parms) epObj = t[self.tvdbid].airedOn(parse_result.air_date)[0] season = int(epObj["seasonnumber"]) episodes = [int(epObj["episodenumber"])] except tvdb_exceptions.tvdb_episodenotfound: logger.log(u"Unable to find episode with date " + str(parse_result.air_date) + " for show " + self.name + ", skipping", logger.WARNING) return None except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to contact TVDB: " + ex(e), logger.WARNING) return None for curEpNum in episodes: episode = int(curEpNum) logger.log(str(self.tvdbid) + u": " + file + " parsed to " + self.name + " " + str(season) + "x" + str(episode), logger.DEBUG) checkQualityAgain = False same_file = False curEp = self.getEpisode(season, episode) if curEp is None: try: curEp = self.getEpisode(season, episode, file) except exceptions.EpisodeNotFoundException: logger.log(str(self.tvdbid) + u": Unable to figure out what this file is, skipping", logger.ERROR) continue else: # if there is a new file associated with this ep then re-check the quality if curEp.location and ek.ek(os.path.normpath, curEp.location) != ek.ek(os.path.normpath, file): logger.log(u"The old episode had a different file associated with it, I will re-check the quality based on the new filename " + file, logger.DEBUG) checkQualityAgain = True with curEp.lock: old_size = curEp.file_size curEp.location = file # if the sizes are the same then it's probably the same file if old_size and curEp.file_size == old_size: same_file = True else: same_file = False curEp.checkForMetaFiles() if rootEp is None: rootEp = curEp else: if curEp not in rootEp.relatedEps: rootEp.relatedEps.append(curEp) # if it's a new file then if not same_file: curEp.release_name = '' # if they replace a file on me I'll make some attempt at re-checking the quality unless I know it's the same file if checkQualityAgain and not same_file: newQuality = Quality.nameQuality(file) logger.log(u"Since this file has been renamed, I checked " + file + " and found quality " + Quality.qualityStrings[newQuality], logger.DEBUG) if newQuality != Quality.UNKNOWN: curEp.status = Quality.compositeStatus(DOWNLOADED, newQuality) # check for status/quality changes as long as it's a new file elif not same_file and sickbeard.helpers.isMediaFile(file) and curEp.status not in Quality.DOWNLOADED + [ARCHIVED, IGNORED]: oldStatus, oldQuality = Quality.splitCompositeStatus(curEp.status) newQuality = Quality.nameQuality(file) if newQuality == Quality.UNKNOWN: newQuality = Quality.assumeQuality(file) newStatus = None # if it was snatched and now exists then set the status correctly if oldStatus == SNATCHED and oldQuality <= newQuality: logger.log(u"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[oldQuality] + " but a file exists with quality " + Quality.qualityStrings[newQuality] + " so I'm setting the status to DOWNLOADED", logger.DEBUG) newStatus = DOWNLOADED # if it was snatched proper and we found a higher quality one then allow the status change elif oldStatus == SNATCHED_PROPER and oldQuality < newQuality: logger.log(u"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[oldQuality] + " but a file exists with quality " + Quality.qualityStrings[newQuality] + " so I'm setting the status to DOWNLOADED", logger.DEBUG) newStatus = DOWNLOADED elif oldStatus not in (SNATCHED, SNATCHED_PROPER): newStatus = DOWNLOADED if newStatus is not None: with curEp.lock: logger.log(u"STATUS: we have an associated file, so setting the status from " + str(curEp.status) + " to DOWNLOADED/" + str(Quality.statusFromName(file)), logger.DEBUG) curEp.status = Quality.compositeStatus(newStatus, newQuality) with curEp.lock: curEp.saveToDB() return rootEp def loadFromDB(self, skipNFO=False): logger.log(str(self.tvdbid) + u": Loading show info from database") myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_shows WHERE tvdb_id = ?", [self.tvdbid]) if len(sqlResults) > 1: raise exceptions.MultipleDBShowsException() elif len(sqlResults) == 0: logger.log(str(self.tvdbid) + u": Unable to find the show in the database") return else: if self.name == "": self.name = sqlResults[0]["show_name"] self.tvrname = sqlResults[0]["tvr_name"] if self.network == "": self.network = sqlResults[0]["network"] if self.genre == "": self.genre = sqlResults[0]["genre"] self.runtime = sqlResults[0]["runtime"] self.status = sqlResults[0]["status"] if self.status is None: self.status = "" self.airs = sqlResults[0]["airs"] if self.airs is None: self.airs = "" self.startyear = sqlResults[0]["startyear"] if self.startyear is None: self.startyear = 0 self.air_by_date = sqlResults[0]["air_by_date"] if self.air_by_date is None: self.air_by_date = 0 self.quality = int(sqlResults[0]["quality"]) self.flatten_folders = int(sqlResults[0]["flatten_folders"]) self.paused = int(sqlResults[0]["paused"]) self.skip_notices = int(sqlResults[0]["skip_notices"]) self._location = sqlResults[0]["location"] if self.tvrid == 0: self.tvrid = int(sqlResults[0]["tvr_id"]) if self.lang == "": self.lang = sqlResults[0]["lang"] self.last_update_tvdb = sqlResults[0]["last_update_tvdb"] self.rls_ignore_words = sqlResults[0]["rls_ignore_words"] self.rls_require_words = sqlResults[0]["rls_require_words"] def loadFromTVDB(self, cache=True, tvapi=None, cachedSeason=None): logger.log(str(self.tvdbid) + u": Loading show info from theTVDB") # There's gotta be a better way of doing this but we don't wanna # change the cache value elsewhere if tvapi is None: ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if not cache: ltvdb_api_parms['cache'] = False if self.lang: ltvdb_api_parms['language'] = self.lang t = tvdb_api.Tvdb(**ltvdb_api_parms) else: t = tvapi myEp = t[self.tvdbid] try: self.name = myEp["seriesname"].strip() except AttributeError: raise tvdb_exceptions.tvdb_attributenotfound("Found %s, but attribute 'seriesname' was empty." % (self.tvdbid)) self.genre = myEp['genre'] self.network = myEp['network'] if myEp["airs_dayofweek"] is not None and myEp["airs_time"] is not None: self.airs = myEp["airs_dayofweek"] + " " + myEp["airs_time"] if myEp["firstaired"] is not None and myEp["firstaired"]: self.startyear = int(myEp["firstaired"].split('-')[0]) if self.airs is None: self.airs = "" if myEp["status"] is not None: self.status = myEp["status"] if self.status is None: self.status = "" self.saveToDB() def nextEpisode(self): logger.log(str(self.tvdbid) + u": Finding the episode which airs next", logger.DEBUG) myDB = db.DBConnection() innerQuery = "SELECT airdate FROM tv_episodes WHERE showid = ? AND airdate >= ? AND status = ? ORDER BY airdate ASC LIMIT 1" innerParams = [self.tvdbid, datetime.date.today().toordinal(), UNAIRED] query = "SELECT * FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= (" + innerQuery + ") and status = ?" params = [self.tvdbid, datetime.date.today().toordinal()] + innerParams + [UNAIRED] sqlResults = myDB.select(query, params) if sqlResults is None or len(sqlResults) == 0: logger.log(str(self.tvdbid) + u": No episode found... need to implement tvrage and also show status", logger.DEBUG) return [] else: logger.log(str(self.tvdbid) + u": Found episode " + str(sqlResults[0]["season"]) + "x" + str(sqlResults[0]["episode"]), logger.DEBUG) foundEps = [] for sqlEp in sqlResults: curEp = self.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"])) foundEps.append(curEp) return foundEps # if we didn't get an episode then try getting one from tvrage # load tvrage info # extract NextEpisode info # verify that we don't have it in the DB somehow (ep mismatch) def deleteShow(self): myDB = db.DBConnection() myDB.action("DELETE FROM tv_episodes WHERE showid = ?", [self.tvdbid]) myDB.action("DELETE FROM tv_shows WHERE tvdb_id = ?", [self.tvdbid]) # remove self from show list sickbeard.showList = [x for x in sickbeard.showList if x.tvdbid != self.tvdbid] # clear the cache image_cache_dir = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images') for cache_file in ek.ek(glob.glob, ek.ek(os.path.join, image_cache_dir, str(self.tvdbid) + '.*')): logger.log(u"Deleting cache file " + cache_file) os.remove(cache_file) def populateCache(self): cache_inst = image_cache.ImageCache() logger.log(u"Checking & filling cache for show " + self.name) cache_inst.fill_cache(self) def refreshDir(self): # make sure the show dir is where we think it is unless dirs are created on the fly if not ek.ek(os.path.isdir, self._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS: return False # load from dir self.loadEpisodesFromDir() # run through all locations from DB, check that they exist logger.log(str(self.tvdbid) + u": Loading all episodes with a location from the database") myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.tvdbid]) for ep in sqlResults: curLoc = os.path.normpath(ep["location"]) season = int(ep["season"]) episode = int(ep["episode"]) try: curEp = self.getEpisode(season, episode) except exceptions.EpisodeDeletedException: logger.log(u"The episode was deleted while we were refreshing it, moving on to the next one", logger.DEBUG) continue # if the path doesn't exist or if it's not in our show dir if not ek.ek(os.path.isfile, curLoc) or not os.path.normpath(curLoc).startswith(os.path.normpath(self.location)): with curEp.lock: # if it used to have a file associated with it and it doesn't anymore then set it to IGNORED if curEp.location and curEp.status in Quality.DOWNLOADED: logger.log(str(self.tvdbid) + u": Location for " + str(season) + "x" + str(episode) + " doesn't exist, removing it and changing our status to IGNORED", logger.DEBUG) curEp.status = IGNORED curEp.location = '' curEp.hasnfo = False curEp.hastbn = False curEp.release_name = '' curEp.saveToDB() def saveToDB(self): logger.log(str(self.tvdbid) + u": Saving show info to database", logger.DEBUG) myDB = db.DBConnection() controlValueDict = {"tvdb_id": self.tvdbid} newValueDict = {"show_name": self.name, "tvr_id": self.tvrid, "location": self._location, "network": self.network, "genre": self.genre, "runtime": self.runtime, "quality": self.quality, "airs": self.airs, "status": self.status, "flatten_folders": self.flatten_folders, "paused": self.paused, "air_by_date": self.air_by_date, "startyear": self.startyear, "tvr_name": self.tvrname, "lang": self.lang, "last_update_tvdb": self.last_update_tvdb, "rls_ignore_words": self.rls_ignore_words, "rls_require_words": self.rls_require_words, "skip_notices": self.skip_notices } myDB.upsert("tv_shows", newValueDict, controlValueDict) def __str__(self): toReturn = "" toReturn += "name: " + self.name + "\n" toReturn += "location: " + self._location + "\n" toReturn += "tvdbid: " + str(self.tvdbid) + "\n" if self.network is not None: toReturn += "network: " + self.network + "\n" if self.airs is not None: toReturn += "airs: " + self.airs + "\n" if self.status is not None: toReturn += "status: " + self.status + "\n" toReturn += "startyear: " + str(self.startyear) + "\n" toReturn += "genre: " + self.genre + "\n" toReturn += "runtime: " + str(self.runtime) + "\n" toReturn += "quality: " + str(self.quality) + "\n" return toReturn def wantEpisode(self, season, episode, quality, manualSearch=False): logger.log(u"Checking if found episode " + str(season) + "x" + str(episode) + " is wanted at quality " + Quality.qualityStrings[quality], logger.DEBUG) # if the quality isn't one we want under any circumstances then just say no anyQualities, bestQualities = Quality.splitQuality(self.quality) logger.log(u"any,best = " + str(anyQualities) + " " + str(bestQualities) + " and found " + str(quality), logger.DEBUG) if quality not in anyQualities + bestQualities: logger.log(u"Don't want this quality, ignoring found episode", logger.DEBUG) return False myDB = db.DBConnection() sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [self.tvdbid, season, episode]) if not sqlResults or not len(sqlResults): logger.log(u"Unable to find a matching episode in database, ignoring found episode", logger.DEBUG) return False epStatus = int(sqlResults[0]["status"]) epStatus_text = statusStrings[epStatus] logger.log(u"Existing episode status: " + str(epStatus) + " (" + epStatus_text + ")", logger.DEBUG) # if we know we don't want it then just say no if epStatus in (SKIPPED, IGNORED, ARCHIVED) and not manualSearch: logger.log(u"Existing episode status is skipped/ignored/archived, ignoring found episode", logger.DEBUG) return False # if it's one of these then we want it as long as it's in our allowed initial qualities if quality in anyQualities + bestQualities: if epStatus in (WANTED, UNAIRED, SKIPPED): logger.log(u"Existing episode status is wanted/unaired/skipped, getting found episode", logger.DEBUG) return True elif manualSearch: logger.log(u"Usually ignoring found episode, but forced search allows the quality, getting found episode", logger.DEBUG) return True else: logger.log(u"Quality is on wanted list, need to check if it's better than existing quality", logger.DEBUG) curStatus, curQuality = Quality.splitCompositeStatus(epStatus) # if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER and quality in bestQualities and quality > curQuality: logger.log(u"Episode already exists but the found episode has better quality, getting found episode", logger.DEBUG) return True else: logger.log(u"Episode already exists and the found episode has same/lower quality, ignoring found episode", logger.DEBUG) logger.log(u"None of the conditions were met, ignoring found episode", logger.DEBUG) return False def getOverview(self, epStatus): if epStatus == WANTED: return Overview.WANTED elif epStatus in (UNAIRED, UNKNOWN): return Overview.UNAIRED elif epStatus in (SKIPPED, IGNORED): return Overview.SKIPPED elif epStatus == ARCHIVED: return Overview.GOOD elif epStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER: anyQualities, bestQualities = Quality.splitQuality(self.quality) # @UnusedVariable if bestQualities: maxBestQuality = max(bestQualities) else: maxBestQuality = None epStatus, curQuality = Quality.splitCompositeStatus(epStatus) if epStatus in (SNATCHED, SNATCHED_PROPER): return Overview.SNATCHED # if they don't want re-downloads then we call it good if they have anything elif maxBestQuality is None: return Overview.GOOD # if they have one but it's not the best they want then mark it as qual elif curQuality < maxBestQuality: return Overview.QUAL # if it's >= maxBestQuality then it's good else: return Overview.GOOD def dirty_setter(attr_name): def wrapper(self, val): if getattr(self, attr_name) != val: setattr(self, attr_name, val) self.dirty = True return wrapper class TVEpisode(object): def __init__(self, show, season, episode, file=""): self._name = "" self._season = season self._episode = episode self._description = "" self._airdate = datetime.date.fromordinal(1) self._hasnfo = False self._hastbn = False self._status = UNKNOWN self._tvdbid = 0 self._file_size = 0 self._release_name = '' # setting any of the above sets the dirty flag self.dirty = True self.show = show self._location = file self.lock = threading.Lock() self.specifyEpisode(self.season, self.episode) self.relatedEps = [] self.checkForMetaFiles() name = property(lambda self: self._name, dirty_setter("_name")) season = property(lambda self: self._season, dirty_setter("_season")) episode = property(lambda self: self._episode, dirty_setter("_episode")) description = property(lambda self: self._description, dirty_setter("_description")) airdate = property(lambda self: self._airdate, dirty_setter("_airdate")) hasnfo = property(lambda self: self._hasnfo, dirty_setter("_hasnfo")) hastbn = property(lambda self: self._hastbn, dirty_setter("_hastbn")) status = property(lambda self: self._status, dirty_setter("_status")) tvdbid = property(lambda self: self._tvdbid, dirty_setter("_tvdbid")) # location = property(lambda self: self._location, dirty_setter("_location")) file_size = property(lambda self: self._file_size, dirty_setter("_file_size")) release_name = property(lambda self: self._release_name, dirty_setter("_release_name")) def _set_location(self, new_location): logger.log(u"Setter sets location to " + new_location, logger.DEBUG) dirty_setter("_location")(self, new_location) if new_location and ek.ek(os.path.isfile, new_location): self.file_size = ek.ek(os.path.getsize, new_location) else: self.file_size = 0 location = property(lambda self: self._location, _set_location) def checkForMetaFiles(self): oldhasnfo = self.hasnfo oldhastbn = self.hastbn cur_nfo = False cur_tbn = False # check for nfo and tbn if ek.ek(os.path.isfile, self.location): for cur_provider in sickbeard.metadata_provider_dict.values(): if cur_provider.episode_metadata: new_result = cur_provider._has_episode_metadata(self) else: new_result = False cur_nfo = new_result or cur_nfo if cur_provider.episode_thumbnails: new_result = cur_provider._has_episode_thumb(self) else: new_result = False cur_tbn = new_result or cur_tbn self.hasnfo = cur_nfo self.hastbn = cur_tbn # if either setting has changed return true, if not return false return oldhasnfo != self.hasnfo or oldhastbn != self.hastbn def specifyEpisode(self, season, episode): sqlResult = self.loadFromDB(season, episode) if not sqlResult: # only load from NFO if we didn't load from DB if ek.ek(os.path.isfile, self.location): try: self.loadFromNFO(self.location) except exceptions.NoNFOException: logger.log(str(self.show.tvdbid) + u": There was an error loading the NFO for episode " + str(season) + "x" + str(episode), logger.ERROR) pass # if we tried loading it from NFO and didn't find the NFO, use TVDB if self.hasnfo is False: try: result = self.loadFromTVDB(season, episode) except exceptions.EpisodeDeletedException: result = False # if we failed SQL *and* NFO, TVDB then fail if result is False: raise exceptions.EpisodeNotFoundException("Couldn't find episode " + str(season) + "x" + str(episode)) # don't update if not needed if self.dirty: self.saveToDB() def loadFromDB(self, season, episode): logger.log(str(self.show.tvdbid) + u": Loading episode details from DB for episode " + str(season) + "x" + str(episode), logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [self.show.tvdbid, season, episode]) if len(sqlResults) > 1: raise exceptions.MultipleDBEpisodesException("Your DB has two records for the same show somehow.") elif len(sqlResults) == 0: logger.log(str(self.show.tvdbid) + u": Episode " + str(self.season) + "x" + str(self.episode) + " not found in the database", logger.DEBUG) return False else: if sqlResults[0]["name"] is not None: self.name = sqlResults[0]["name"] self.season = season self.episode = episode self.description = sqlResults[0]["description"] if self.description is None: self.description = "" self.airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"])) # logger.log(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0]["status"]), logger.DEBUG) self.status = int(sqlResults[0]["status"]) # don't overwrite my location if sqlResults[0]["location"] != "" and sqlResults[0]["location"] is not None: self.location = os.path.normpath(sqlResults[0]["location"]) if sqlResults[0]["file_size"]: self.file_size = int(sqlResults[0]["file_size"]) else: self.file_size = 0 self.tvdbid = int(sqlResults[0]["tvdbid"]) if sqlResults[0]["release_name"] is not None: self.release_name = sqlResults[0]["release_name"] self.dirty = False return True def loadFromTVDB(self, season=None, episode=None, cache=True, tvapi=None, cachedSeason=None): if season is None: season = self.season if episode is None: episode = self.episode logger.log(str(self.show.tvdbid) + u": Loading episode details from theTVDB for episode " + str(season) + "x" + str(episode), logger.DEBUG) tvdb_lang = self.show.lang try: if cachedSeason is None: if tvapi is None: # There's gotta be a better way of doing this but we don't wanna # change the cache value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if not cache: ltvdb_api_parms['cache'] = False if tvdb_lang: ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(**ltvdb_api_parms) else: t = tvapi myEp = t[self.show.tvdbid][season][episode] else: myEp = cachedSeason[episode] except (tvdb_exceptions.tvdb_error, IOError), e: logger.log(u"TVDB threw up an error: " + ex(e), logger.DEBUG) # if the episode is already valid just log it, if not throw it up if self.name: logger.log(u"TVDB timed out but we have enough info from other sources, allowing the error", logger.DEBUG) return else: logger.log(u"TVDB timed out, unable to create the episode", logger.ERROR) return False except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find the episode on tvdb... has it been removed? Should I delete from db?", logger.DEBUG) # if I'm no longer on TVDB but I once was then delete myself from the DB if self.tvdbid != -1: self.deleteEpisode() return if not myEp["firstaired"] or myEp["firstaired"] == "0000-00-00": myEp["firstaired"] = str(datetime.date.fromordinal(1)) if myEp["episodename"] is None or myEp["episodename"] == "": logger.log(u"This episode (" + self.show.name + " - " + str(season) + "x" + str(episode) + ") has no name on TVDB") # if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now if self.tvdbid != -1: self.deleteEpisode() return False self.name = myEp["episodename"] self.season = season self.episode = episode tmp_description = myEp["overview"] if tmp_description is None: self.description = "" else: self.description = tmp_description rawAirdate = [int(x) for x in myEp["firstaired"].split("-")] try: self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2]) except ValueError: logger.log(u"Malformed air date retrieved from TVDB (" + self.show.name + " - " + str(season) + "x" + str(episode) + ")", logger.ERROR) # if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now if self.tvdbid != -1: self.deleteEpisode() return False # early conversion to int so that episode doesn't get marked dirty self.tvdbid = int(myEp["id"]) # don't update show status if show dir is missing, unless it's missing on purpose if not ek.ek(os.path.isdir, self.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS and not sickbeard.ADD_SHOWS_WO_DIR: logger.log(u"The show dir is missing, not bothering to change the episode statuses since it'd probably be invalid") return logger.log(str(self.show.tvdbid) + u": Setting status for " + str(season) + "x" + str(episode) + " based on status " + str(self.status) + " and existence of " + self.location, logger.DEBUG) # if we don't have the file if not ek.ek(os.path.isfile, self.location): # if it hasn't aired yet set the status to UNAIRED if self.airdate >= datetime.date.today() and self.status in [SKIPPED, UNAIRED, UNKNOWN, WANTED]: logger.log(u"Episode airs in the future, marking it " + str(UNAIRED), logger.DEBUG) self.status = UNAIRED # if there's no airdate then set it to skipped (and respect ignored) elif self.airdate == datetime.date.fromordinal(1): if self.status == IGNORED: logger.log(u"Episode has no air date, but it's already marked as ignored", logger.DEBUG) else: logger.log(u"Episode has no air date, automatically marking it skipped", logger.DEBUG) self.status = SKIPPED # if we don't have the file and the airdate is in the past else: if self.status == UNAIRED: if self.season > 0: self.status = WANTED else: # if it's a special then just skip it self.status = SKIPPED # if we somehow are still UNKNOWN then just skip it elif self.status == UNKNOWN: self.status = SKIPPED else: logger.log(u"Not touching status because we have no ep file, the airdate is in the past, and the status is " + str(self.status), logger.DEBUG) # if we have a media file then it's downloaded elif sickbeard.helpers.isMediaFile(self.location): # leave propers alone, you have to either post-process them or manually change them back if self.status not in Quality.SNATCHED_PROPER + Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED]: logger.log(u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)), logger.DEBUG) self.status = Quality.statusFromName(self.location) # shouldn't get here probably else: logger.log(u"6 Status changes from " + str(self.status) + " to " + str(UNKNOWN), logger.DEBUG) self.status = UNKNOWN # hasnfo, hastbn, status? def loadFromNFO(self, location): if not ek.ek(os.path.isdir, self.show._location): logger.log(str(self.show.tvdbid) + u": The show dir is missing, not bothering to try loading the episode NFO") return logger.log(str(self.show.tvdbid) + u": Loading episode details from the NFO file associated with " + location, logger.DEBUG) self.location = location if self.location != "": if self.status == UNKNOWN: if sickbeard.helpers.isMediaFile(self.location): logger.log(u"7 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)), logger.DEBUG) self.status = Quality.statusFromName(self.location) nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo") logger.log(str(self.show.tvdbid) + u": Using NFO name " + nfoFile, logger.DEBUG) if ek.ek(os.path.isfile, nfoFile): try: showXML = etree.ElementTree(file=nfoFile) except (SyntaxError, ValueError), e: logger.log(u"Error loading the NFO, backing up the NFO and skipping for now: " + ex(e), logger.ERROR) # TODO: figure out what's wrong and fix it try: ek.ek(os.rename, nfoFile, nfoFile + ".old") except Exception, e: logger.log(u"Failed to rename your episode's NFO file - you need to delete it or fix it: " + ex(e), logger.ERROR) raise exceptions.NoNFOException("Error in NFO format") for epDetails in showXML.getiterator('episodedetails'): if epDetails.findtext('season') is None or int(epDetails.findtext('season')) != self.season or \ epDetails.findtext('episode') is None or int(epDetails.findtext('episode')) != self.episode: logger.log(str(self.show.tvdbid) + u": NFO has an <episodedetails> block for a different episode - wanted " + str(self.season) + "x" + str(self.episode) + " but got " + str(epDetails.findtext('season')) + "x" + str(epDetails.findtext('episode')), logger.DEBUG) continue if epDetails.findtext('title') is None or epDetails.findtext('aired') is None: raise exceptions.NoNFOException("Error in NFO format (missing episode title or airdate)") self.name = epDetails.findtext('title') self.episode = int(epDetails.findtext('episode')) self.season = int(epDetails.findtext('season')) self.description = epDetails.findtext('plot') if self.description is None: self.description = "" if epDetails.findtext('aired'): rawAirdate = [int(x) for x in epDetails.findtext('aired').split("-")] self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2]) else: self.airdate = datetime.date.fromordinal(1) self.hasnfo = True else: self.hasnfo = False if ek.ek(os.path.isfile, sickbeard.helpers.replaceExtension(nfoFile, "tbn")): self.hastbn = True else: self.hastbn = False def __str__(self): toReturn = "" toReturn += str(self.show.name) + " - " + str(self.season) + "x" + str(self.episode) + " - " + str(self.name) + "\n" toReturn += "location: " + str(self.location) + "\n" toReturn += "description: " + str(self.description) + "\n" toReturn += "airdate: " + str(self.airdate.toordinal()) + " (" + str(self.airdate) + ")\n" toReturn += "hasnfo: " + str(self.hasnfo) + "\n" toReturn += "hastbn: " + str(self.hastbn) + "\n" toReturn += "status: " + str(self.status) + "\n" return toReturn def createMetaFiles(self, force=False): if not ek.ek(os.path.isdir, self.show._location): logger.log(str(self.show.tvdbid) + u": The show dir is missing, not bothering to try to create metadata") return self.createNFO(force) self.createThumbnail(force) if self.checkForMetaFiles(): self.saveToDB() def createNFO(self, force=False): result = False for cur_provider in sickbeard.metadata_provider_dict.values(): result = cur_provider.create_episode_metadata(self) or result return result def createThumbnail(self, force=False): result = False for cur_provider in sickbeard.metadata_provider_dict.values(): result = cur_provider.create_episode_thumb(self) or result return result def deleteEpisode(self): logger.log(u"Deleting " + self.show.name + " " + str(self.season) + "x" + str(self.episode) + " from the DB", logger.DEBUG) # remove myself from the show dictionary if self.show.getEpisode(self.season, self.episode, noCreate=True) == self: logger.log(u"Removing myself from my show's list", logger.DEBUG) del self.show.episodes[self.season][self.episode] # delete myself from the DB logger.log(u"Deleting myself from the database", logger.DEBUG) myDB = db.DBConnection() sql = "DELETE FROM tv_episodes WHERE showid=" + str(self.show.tvdbid) + " AND season=" + str(self.season) + " AND episode=" + str(self.episode) myDB.action(sql) raise exceptions.EpisodeDeletedException() def saveToDB(self, forceSave=False): """ Saves this episode to the database if any of its data has been changed since the last save. forceSave: If True it will save to the database even if no data has been changed since the last save (aka if the record is not dirty). """ if not self.dirty and not forceSave: logger.log(str(self.show.tvdbid) + u": Not saving episode to db - record is not dirty", logger.DEBUG) return logger.log(str(self.show.tvdbid) + u": Saving episode details to database", logger.DEBUG) logger.log(u"STATUS IS " + str(self.status), logger.DEBUG) myDB = db.DBConnection() newValueDict = {"tvdbid": self.tvdbid, "name": self.name, "description": self.description, "airdate": self.airdate.toordinal(), "hasnfo": self.hasnfo, "hastbn": self.hastbn, "status": self.status, "location": self.location, "file_size": self.file_size, "release_name": self.release_name} controlValueDict = {"showid": self.show.tvdbid, "season": self.season, "episode": self.episode} # use a custom update/insert method to get the data into the DB myDB.upsert("tv_episodes", newValueDict, controlValueDict) def fullPath(self): if self.location is None or self.location == "": return None else: return ek.ek(os.path.join, self.show.location, self.location) def prettyName(self): """ Returns the name of this episode in a "pretty" human-readable format. Used for logging and notifications and such. Returns: A string representing the episode's name and season/ep numbers """ return self._format_pattern('%SN - %Sx%0E - %EN') def _ep_name(self): """ Returns the name of the episode to use during renaming. Combines the names of related episodes. Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name" "Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name" """ multiNameRegex = "(.*) \(\d{1,2}\)" self.relatedEps = sorted(self.relatedEps, key=lambda x: x.episode) if len(self.relatedEps) == 0: goodName = self.name else: goodName = '' singleName = True curGoodName = None for curName in [self.name] + [x.name for x in self.relatedEps]: match = re.match(multiNameRegex, curName) if not match: singleName = False break if curGoodName is None: curGoodName = match.group(1) elif curGoodName != match.group(1): singleName = False break if singleName: goodName = curGoodName else: goodName = self.name for relEp in self.relatedEps: goodName += " & " + relEp.name return goodName def _replace_map(self): """ Generates a replacement map for this episode which maps all possible custom naming patterns to the correct value for this episode. Returns: A dict with patterns as the keys and their replacement values as the values. """ ep_name = self._ep_name() def dot(name): return helpers.sanitizeSceneName(name) def us(name): return re.sub('[ -]', '_', name) def release_name(name): if name: name = helpers.remove_non_release_groups(helpers.remove_extension(name)) return name def release_group(name): if name: name = helpers.remove_non_release_groups(helpers.remove_extension(name)) else: return "" np = NameParser(False) try: parse_result = np.parse(name) except InvalidNameException, e: logger.log(u"Unable to get parse release_group: " + ex(e), logger.DEBUG) return "" if not parse_result.release_group: return "" return parse_result.release_group epStatus, epQual = Quality.splitCompositeStatus(self.status) # @UnusedVariable return { '%SN': self.show.name, '%S.N': dot(self.show.name), '%S_N': us(self.show.name), '%EN': ep_name, '%E.N': dot(ep_name), '%E_N': us(ep_name), '%QN': Quality.qualityStrings[epQual], '%Q.N': dot(Quality.qualityStrings[epQual]), '%Q_N': us(Quality.qualityStrings[epQual]), '%S': str(self.season), '%0S': '%02d' % self.season, '%E': str(self.episode), '%0E': '%02d' % self.episode, '%RN': release_name(self.release_name), '%RG': release_group(self.release_name), '%AD': str(self.airdate).replace('-', ' '), '%A.D': str(self.airdate).replace('-', '.'), '%A_D': us(str(self.airdate)), '%A-D': str(self.airdate), '%Y': str(self.airdate.year), '%M': str(self.airdate.month), '%D': str(self.airdate.day), '%0M': '%02d' % self.airdate.month, '%0D': '%02d' % self.airdate.day, } def _format_string(self, pattern, replace_map): """ Replaces all template strings with the correct value """ result_name = pattern # do the replacements for cur_replacement in sorted(replace_map.keys(), reverse=True): result_name = result_name.replace(cur_replacement, helpers.sanitizeFileName(replace_map[cur_replacement])) result_name = result_name.replace(cur_replacement.lower(), helpers.sanitizeFileName(replace_map[cur_replacement].lower())) return result_name def _format_pattern(self, pattern=None, multi=None, debug=False): """ Manipulates an episode naming pattern and then fills the template in """ if pattern is None: pattern = sickbeard.NAMING_PATTERN if multi is None: multi = sickbeard.NAMING_MULTI_EP replace_map = self._replace_map() result_name = pattern # if there's no release group then replace it with a reasonable facsimile if not replace_map['%RN']: if self.show.air_by_date: result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N-SiCKBEARD') result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n-sickbeard') else: result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N-SiCKBEARD') result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n-sickbeard') if debug: logger.log(u"Episode has no release name, replacing it with a generic one: " + result_name, logger.DEBUG) if not replace_map['%RG']: result_name = result_name.replace('%RG', 'SiCKBEARD') result_name = result_name.replace('%rg', 'sickbeard') if debug: logger.log(u"Episode has no release group, replacing it with a generic one: " + result_name, logger.DEBUG) # split off ep name part only name_groups = re.split(r'[\\/]', result_name) # figure out the double-ep numbering style for each group, if applicable for cur_name_group in name_groups: season_format = sep = ep_sep = ep_format = None season_ep_regex = ''' (?P<pre_sep>[ _.-]*) ((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N)) (.*?) (%0?E(?![._]?N)) (?P<post_sep>[ _.-]*) ''' ep_only_regex = '(E?%0?E(?![._]?N))' # try the normal way season_ep_match = re.search(season_ep_regex, cur_name_group, re.I | re.X) ep_only_match = re.search(ep_only_regex, cur_name_group, re.I | re.X) # if we have a season and episode then collect the necessary data if season_ep_match: season_format = season_ep_match.group(2) ep_sep = season_ep_match.group(3) ep_format = season_ep_match.group(4) sep = season_ep_match.group('pre_sep') if not sep: sep = season_ep_match.group('post_sep') if not sep: sep = ' ' # force 2-3-4 format if they chose to extend if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED): ep_sep = '-' regex_used = season_ep_regex # if there's no season then there's not much choice so we'll just force them to use 03-04-05 style elif ep_only_match: season_format = '' ep_sep = '-' ep_format = ep_only_match.group(1) sep = '' regex_used = ep_only_regex else: continue # we need at least this much info to continue if not ep_sep or not ep_format: continue # start with the ep string, eg. E03 ep_string = self._format_string(ep_format.upper(), replace_map) for other_ep in self.relatedEps: # for limited extend we only append the last ep if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED) and other_ep != self.relatedEps[-1]: continue elif multi == NAMING_DUPLICATE: # add " - S01" ep_string += sep + season_format elif multi == NAMING_SEPARATED_REPEAT: ep_string += sep # add "E04" ep_string += ep_sep if multi == NAMING_LIMITED_EXTEND_E_PREFIXED: ep_string += 'E' ep_string += other_ep._format_string(ep_format.upper(), other_ep._replace_map()) if season_ep_match: regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>' elif ep_only_match: regex_replacement = ep_string # fill out the template for this piece and then insert this piece into the actual pattern cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group) # cur_name_group_result = cur_name_group.replace(ep_format, ep_string) # logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" from "+cur_name_group, logger.DEBUG) result_name = result_name.replace(cur_name_group, cur_name_group_result) result_name = self._format_string(result_name, replace_map) if debug: logger.log(u"formatting pattern: " + pattern + " -> " + result_name, logger.DEBUG) return result_name def proper_path(self): """ Figures out the path where this episode SHOULD live according to the renaming rules, relative from the show dir """ result = self.formatted_filename() # if they want us to flatten it and we're allowed to flatten it then we will if self.show.flatten_folders and not sickbeard.NAMING_FORCE_FOLDERS: return result # if not we append the folder on and use that else: result = ek.ek(os.path.join, self.formatted_dir(), result) return result def formatted_dir(self, pattern=None, multi=None, debug=False): """ Just the folder name of the episode """ if pattern is None: # we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps: pattern = sickbeard.NAMING_ABD_PATTERN else: pattern = sickbeard.NAMING_PATTERN # split off the dirs only, if they exist name_groups = re.split(r'[\\/]', pattern) if len(name_groups) == 1: return '' else: return self._format_pattern(os.sep.join(name_groups[:-1]), multi, debug) def formatted_filename(self, pattern=None, multi=None, debug=False): """ Just the filename of the episode, formatted based on the naming settings """ if pattern is None: # we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps: pattern = sickbeard.NAMING_ABD_PATTERN else: pattern = sickbeard.NAMING_PATTERN # split off the filename only, if they exist name_groups = re.split(r'[\\/]', pattern) return self._format_pattern(name_groups[-1], multi, debug) def rename(self): """ Renames an episode file and all related files to the location and filename as specified in the naming settings. """ if not ek.ek(os.path.isfile, self.location): logger.log(u"Can't perform rename on " + self.location + " when it doesn't exist, skipping", logger.WARNING) return proper_path = self.proper_path() absolute_proper_path = ek.ek(os.path.join, self.show.location, proper_path) absolute_current_path_no_ext, file_ext = ek.ek(os.path.splitext, self.location) absolute_current_path_no_ext_length = len(absolute_current_path_no_ext) current_path = absolute_current_path_no_ext if absolute_current_path_no_ext.startswith(self.show.location): current_path = absolute_current_path_no_ext[len(self.show.location):] logger.log(u"Renaming/moving episode from the base path " + self.location + " to " + absolute_proper_path, logger.DEBUG) # if it's already named correctly then don't do anything if proper_path == current_path: logger.log(str(self.tvdbid) + u": File " + self.location + " is already named correctly, skipping", logger.DEBUG) return related_files = helpers.list_associated_files(self.location, base_name_only=True) logger.log(u"Files associated to " + self.location + ": " + str(related_files), logger.DEBUG) # move the ep file result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length) # move related files for cur_related_file in related_files: cur_result = helpers.rename_ep_file(cur_related_file, absolute_proper_path, absolute_current_path_no_ext_length) if cur_result is False: logger.log(str(self.tvdbid) + u": Unable to rename file " + cur_related_file, logger.ERROR) # save the ep with self.lock: if result is not False: self.location = absolute_proper_path + file_ext for relEp in self.relatedEps: relEp.location = absolute_proper_path + file_ext # in case something changed with the metadata just do a quick check for curEp in [self] + self.relatedEps: curEp.checkForMetaFiles() # save any changes to the database with self.lock: self.saveToDB() for relEp in self.relatedEps: relEp.saveToDB()
75,655
Python
.py
1,327
42.45893
285
0.582623
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,316
naming.py
midgetspy_Sick-Beard/sickbeard/naming.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os import sickbeard from sickbeard import encodingKludge as ek from sickbeard import tv from sickbeard import common from sickbeard import logger from sickbeard.name_parser.parser import NameParser, InvalidNameException from common import Quality, DOWNLOADED name_presets = ('%SN - %Sx%0E - %EN', '%S.N.S%0SE%0E.%E.N', '%Sx%0E - %EN', 'S%0SE%0E - %EN', 'Season %0S/%S.N.S%0SE%0E.%Q.N-%RG' ) name_abd_presets = ('%SN - %A-D - %EN', '%S.N.%A.D.%E.N.%Q.N', '%Y/%0M/%S.N.%A.D.%E.N-%RG' ) class TVShow(): def __init__(self): self.name = "Show Name" self.genre = "Comedy" self.air_by_date = 0 class TVEpisode(tv.TVEpisode): def __init__(self, season, episode, name): self.relatedEps = [] self._name = name self._season = season self._episode = episode self._airdate = datetime.date(2010, 3, 9) self.show = TVShow() self._status = Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV) self._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP' def check_force_season_folders(pattern=None, multi=None): """ Checks if the name can still be parsed if you strip off the folders to determine if we need to force season folders to be enabled or not. Returns true if season folders need to be forced on or false otherwise. """ if pattern is None: pattern = sickbeard.NAMING_PATTERN valid = not validate_name(pattern, None, file_only=True) if multi is not None: valid = valid or not validate_name(pattern, multi, file_only=True) return valid def check_valid_naming(pattern=None, multi=None): """ Checks if the name is can be parsed back to its original form for both single and multi episodes. Returns true if the naming is valid, false if not. """ if pattern is None: pattern = sickbeard.NAMING_PATTERN logger.log(u"Checking whether the pattern " + pattern + " is valid for a single episode", logger.DEBUG) valid = validate_name(pattern, None) if multi is not None: logger.log(u"Checking whether the pattern " + pattern + " is valid for a multi episode", logger.DEBUG) valid = valid and validate_name(pattern, multi) return valid def check_valid_abd_naming(pattern=None): """ Checks if the name is can be parsed back to its original form for an air-by-date format. Returns true if the naming is valid, false if not. """ if pattern is None: pattern = sickbeard.NAMING_PATTERN logger.log(u"Checking whether the pattern " + pattern + " is valid for an air-by-date episode", logger.DEBUG) valid = validate_name(pattern, abd=True) return valid def validate_name(pattern, multi=None, file_only=False, abd=False): ep = _generate_sample_ep(multi, abd) parser = NameParser(True) new_name = ep.formatted_filename(pattern, multi, debug=True) + '.ext' new_path = ep.formatted_dir(pattern, multi, debug=True) if not file_only: new_name = ek.ek(os.path.join, new_path, new_name) if not new_name: logger.log(u"Unable to create a name out of " + pattern, logger.DEBUG) return False logger.log(u"Trying to parse " + new_name, logger.DEBUG) try: result = parser.parse(new_name) except InvalidNameException: logger.log(u"Unable to parse " + new_name + ", not valid", logger.DEBUG) return False logger.log(u"The name " + new_name + " parsed into " + str(result), logger.DEBUG) if abd: if result.air_date != ep.airdate: logger.log(u"Air date incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False else: if result.season_number != ep.season: logger.log(u"Season incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False if result.episode_numbers != [x.episode for x in [ep] + ep.relatedEps]: logger.log(u"Episode incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False return True def _generate_sample_ep(multi=None, abd=False): # make a fake episode object ep = TVEpisode(2, 3, "Ep Name") ep._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) ep._airdate = datetime.date(2011, 3, 9) if abd: ep._release_name = 'Show.Name.2011.03.09.HDTV.XviD-RLSGROUP' else: ep._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP' if multi is not None: ep._name = "Ep Name (1)" ep._release_name = 'Show.Name.S02E03E04E05.HDTV.XviD-RLSGROUP' secondEp = TVEpisode(2, 4, "Ep Name (2)") secondEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) secondEp._release_name = ep._release_name thirdEp = TVEpisode(2, 5, "Ep Name (3)") thirdEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) thirdEp._release_name = ep._release_name ep.relatedEps.append(secondEp) ep.relatedEps.append(thirdEp) return ep def test_name(pattern, multi=None, abd=False): ep = _generate_sample_ep(multi, abd) return {'name': ep.formatted_filename(pattern, multi, debug=True), 'dir': ep.formatted_dir(pattern, multi, debug=True)}
6,377
Python
.py
139
38.021583
124
0.65332
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,317
show_queue.py
midgetspy_Sick-Beard/sickbeard/show_queue.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import traceback import sickbeard from lib.tvdb_api import tvdb_exceptions, tvdb_api from sickbeard.common import SKIPPED, WANTED from sickbeard.tv import TVShow from sickbeard import exceptions, logger, ui, db from sickbeard import generic_queue from sickbeard import name_cache from sickbeard.exceptions import ex class ShowQueue(generic_queue.GenericQueue): def __init__(self): generic_queue.GenericQueue.__init__(self) self.queue_name = "SHOWQUEUE" def _isInQueue(self, show, actions): return show in [x.show for x in self.queue if x.action_id in actions] def _isBeingSomethinged(self, show, actions): return self.currentItem is not None and show == self.currentItem.show and \ self.currentItem.action_id in actions def isInUpdateQueue(self, show): return self._isInQueue(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE)) def isInRefreshQueue(self, show): return self._isInQueue(show, (ShowQueueActions.REFRESH,)) def isInRenameQueue(self, show): return self._isInQueue(show, (ShowQueueActions.RENAME,)) def isBeingAdded(self, show): return self._isBeingSomethinged(show, (ShowQueueActions.ADD,)) def isBeingUpdated(self, show): return self._isBeingSomethinged(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE)) def isBeingRefreshed(self, show): return self._isBeingSomethinged(show, (ShowQueueActions.REFRESH,)) def isBeingRenamed(self, show): return self._isBeingSomethinged(show, (ShowQueueActions.RENAME,)) def _getLoadingShowList(self): return [x for x in self.queue + [self.currentItem] if x is not None and x.isLoading] loadingShowList = property(_getLoadingShowList) def updateShow(self, show, force=False): if self.isBeingAdded(show): raise exceptions.CantUpdateException("Show is still being added, wait until it is finished before you update.") if self.isBeingUpdated(show): raise exceptions.CantUpdateException("This show is already being updated, can't update again until it's done.") if self.isInUpdateQueue(show): raise exceptions.CantUpdateException("This show is already being updated, can't update again until it's done.") if not force: queueItemObj = QueueItemUpdate(show) else: queueItemObj = QueueItemForceUpdate(show) self.add_item(queueItemObj) return queueItemObj def refreshShow(self, show, force=False): if self.isBeingRefreshed(show) and not force: raise exceptions.CantRefreshException("This show is already being refreshed, not refreshing again.") if (self.isBeingUpdated(show) or self.isInUpdateQueue(show)) and not force: logger.log(u"A refresh was attempted but there is already an update queued or in progress. Since updates do a refresh at the end anyway I'm skipping this request.", logger.DEBUG) return queueItemObj = QueueItemRefresh(show) self.add_item(queueItemObj) return queueItemObj def renameShowEpisodes(self, show, force=False): queueItemObj = QueueItemRename(show) self.add_item(queueItemObj) return queueItemObj def addShow(self, tvdb_id, showDir, default_status=None, quality=None, flatten_folders=None, lang="en"): queueItemObj = QueueItemAdd(tvdb_id, showDir, default_status, quality, flatten_folders, lang) self.add_item(queueItemObj) return queueItemObj class ShowQueueActions: REFRESH = 1 ADD = 2 UPDATE = 3 FORCEUPDATE = 4 RENAME = 5 names = {REFRESH: 'Refresh', ADD: 'Add', UPDATE: 'Update', FORCEUPDATE: 'Force Update', RENAME: 'Rename', } class ShowQueueItem(generic_queue.QueueItem): """ Represents an item in the queue waiting to be executed Can be either: - show being added (may or may not be associated with a show object) - show being refreshed - show being updated - show being force updated """ def __init__(self, action_id, show): generic_queue.QueueItem.__init__(self, ShowQueueActions.names[action_id], action_id) self.show = show def isInQueue(self): return self in sickbeard.showQueueScheduler.action.queue + [sickbeard.showQueueScheduler.action.currentItem] # @UndefinedVariable def _getName(self): return str(self.show.tvdbid) def _isLoading(self): return False show_name = property(_getName) isLoading = property(_isLoading) class QueueItemAdd(ShowQueueItem): def __init__(self, tvdb_id, showDir, default_status, quality, flatten_folders, lang): self.tvdb_id = tvdb_id self.showDir = showDir self.default_status = default_status self.quality = quality self.flatten_folders = flatten_folders self.lang = lang self.show = None # this will initialize self.show to None ShowQueueItem.__init__(self, ShowQueueActions.ADD, self.show) def _getName(self): """ Returns the show name if there is a show object created, if not returns the dir that the show is being added to. """ if self.show is None: return self.showDir return self.show.name show_name = property(_getName) def _isLoading(self): """ Returns True if we've gotten far enough to have a show object, or False if we still only know the folder name. """ if self.show is None: return True return False isLoading = property(_isLoading) def execute(self): ShowQueueItem.execute(self) logger.log(u"Starting to add show " + self.showDir) try: # make sure the tvdb ids are valid try: ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if self.lang: ltvdb_api_parms['language'] = self.lang logger.log(u"TVDB: " + repr(ltvdb_api_parms)) t = tvdb_api.Tvdb(**ltvdb_api_parms) s = t[self.tvdb_id] # this usually only happens if they have an NFO in their show dir which gave us a TVDB ID that has no proper english version of the show if not s['seriesname']: logger.log(u"Show in " + self.showDir + " has no name on TVDB, probably the wrong language used to search with.", logger.ERROR) ui.notifications.error("Unable to add show", "Show in " + self.showDir + " has no name on TVDB, probably the wrong language. Delete .nfo and add manually in the correct language.") self._finishEarly() return # if the show has no episodes/seasons if not s: logger.log(u"Show " + str(s['seriesname']) + " is on TVDB but contains no season/episode data.", logger.ERROR) ui.notifications.error("Unable to add show", "Show " + str(s['seriesname']) + " is on TVDB but contains no season/episode data.") self._finishEarly() return except tvdb_exceptions.tvdb_exception, e: logger.log(u"Error contacting TVDB: " + ex(e), logger.ERROR) ui.notifications.error("Unable to add show", "Unable to look up the show in " + self.showDir + " on TVDB, not using the NFO. Delete .nfo and add manually in the correct language.") self._finishEarly() return # clear the name cache name_cache.clearCache() newShow = TVShow(self.tvdb_id, self.lang) newShow.loadFromTVDB() self.show = newShow # set up initial values self.show.location = self.showDir self.show.quality = self.quality if self.quality else sickbeard.QUALITY_DEFAULT self.show.flatten_folders = self.flatten_folders if self.flatten_folders is not None else sickbeard.FLATTEN_FOLDERS_DEFAULT self.show.paused = 0 self.show.skip_notices = 0 # be smartish about this if self.show.genre and "talk show" in self.show.genre.lower(): self.show.air_by_date = 1 if self.show.genre and "documentary" in self.show.genre.lower(): self.show.air_by_date = 0 except tvdb_exceptions.tvdb_exception, e: logger.log(u"Unable to add show due to an error with TVDB: " + ex(e), logger.ERROR) if self.show: ui.notifications.error("Unable to add " + str(self.show.name) + " due to an error with TVDB") else: ui.notifications.error("Unable to add show due to an error with TVDB") self._finishEarly() return except exceptions.MultipleShowObjectsException: logger.log(u"The show in " + self.showDir + " is already in your show list, skipping", logger.ERROR) ui.notifications.error('Show skipped', "The show in " + self.showDir + " is already in your show list") self._finishEarly() return except Exception, e: logger.log(u"Error trying to add show: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finishEarly() raise try: self.show.saveToDB() except Exception, e: logger.log(u"Error saving the show to the database: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) self._finishEarly() raise # add it to the show list sickbeard.showList.append(self.show) try: self.show.loadEpisodesFromTVDB() except Exception, e: logger.log(u"Error with TVDB, not creating episode list: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) try: self.show.setTVRID() except Exception, e: logger.log(u"Error with TVRage, not setting tvrid" + ex(e), logger.ERROR) try: self.show.loadEpisodesFromDir() except Exception, e: logger.log(u"Error searching dir for episodes: " + ex(e), logger.ERROR) logger.log(traceback.format_exc(), logger.DEBUG) # if they gave a custom status then change all the eps to it if self.default_status != SKIPPED: logger.log(u"Setting all episodes to the specified default status: " + str(self.default_status)) myDB = db.DBConnection() myDB.action("UPDATE tv_episodes SET status = ? WHERE status = ? AND showid = ? AND season > 0", [self.default_status, SKIPPED, self.show.tvdbid]) # if they started with WANTED eps then run the backlog if self.default_status == WANTED: logger.log(u"Launching backlog for this show since its episodes are WANTED") sickbeard.backlogSearchScheduler.action.searchBacklog([self.show]) # @UndefinedVariable self.show.writeMetadata() self.show.populateCache() self.show.flushEpisodes() self.finish() def _finishEarly(self): if self.show is not None: self.show.deleteShow() self.finish() class QueueItemRefresh(ShowQueueItem): def __init__(self, show=None): ShowQueueItem.__init__(self, ShowQueueActions.REFRESH, show) # do refreshes first because they're quick self.priority = generic_queue.QueuePriorities.HIGH def execute(self): ShowQueueItem.execute(self) logger.log(u"Performing refresh on " + self.show.name) self.show.refreshDir() self.show.writeMetadata() self.show.populateCache() self.inProgress = False class QueueItemRename(ShowQueueItem): def __init__(self, show=None): ShowQueueItem.__init__(self, ShowQueueActions.RENAME, show) def execute(self): ShowQueueItem.execute(self) logger.log(u"Performing rename on " + self.show.name) try: show_loc = self.show.location # @UnusedVariable except exceptions.ShowDirNotFoundException: logger.log(u"Can't perform rename on " + self.show.name + " when the show dir is missing.", logger.WARNING) return ep_obj_rename_list = [] ep_obj_list = self.show.getAllEpisodes(has_location=True) for cur_ep_obj in ep_obj_list: # Only want to rename if we have a location if cur_ep_obj.location: if cur_ep_obj.relatedEps: # do we have one of multi-episodes in the rename list already have_already = False for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]: if cur_related_ep in ep_obj_rename_list: have_already = True break if not have_already: ep_obj_rename_list.append(cur_ep_obj) else: ep_obj_rename_list.append(cur_ep_obj) for cur_ep_obj in ep_obj_rename_list: cur_ep_obj.rename() self.inProgress = False class QueueItemUpdate(ShowQueueItem): def __init__(self, show=None): ShowQueueItem.__init__(self, ShowQueueActions.UPDATE, show) self.force = False def execute(self): ShowQueueItem.execute(self) logger.log(u"Beginning update of " + self.show.name) logger.log(u"Retrieving show info from TVDB", logger.DEBUG) try: self.show.loadFromTVDB(cache=not self.force) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to contact TVDB, aborting: " + ex(e), logger.WARNING) return except tvdb_exceptions.tvdb_attributenotfound, e: logger.log(u"Data retrieved from TVDB was incomplete, aborting: " + ex(e), logger.ERROR) return # get episode list from DB logger.log(u"Loading all episodes from the database", logger.DEBUG) DBEpList = self.show.loadEpisodesFromDB() # get episode list from TVDB logger.log(u"Loading all episodes from theTVDB", logger.DEBUG) try: TVDBEpList = self.show.loadEpisodesFromTVDB(cache=not self.force) except tvdb_exceptions.tvdb_exception, e: logger.log(u"Unable to get info from TVDB, the show info will not be refreshed: " + ex(e), logger.ERROR) TVDBEpList = None if TVDBEpList is None: logger.log(u"No data returned from TVDB, unable to update this show", logger.ERROR) else: # for each ep we found on TVDB delete it from the DB list for curSeason in TVDBEpList: for curEpisode in TVDBEpList[curSeason]: logger.log(u"Removing " + str(curSeason) + "x" + str(curEpisode) + " from the DB list", logger.DEBUG) if curSeason in DBEpList and curEpisode in DBEpList[curSeason]: del DBEpList[curSeason][curEpisode] # for the remaining episodes in the DB list just delete them from the DB for curSeason in DBEpList: for curEpisode in DBEpList[curSeason]: logger.log(u"Permanently deleting episode " + str(curSeason) + "x" + str(curEpisode) + " from the database", logger.MESSAGE) curEp = self.show.getEpisode(curSeason, curEpisode) try: curEp.deleteEpisode() except exceptions.EpisodeDeletedException: pass # now that we've updated the DB from TVDB see if there's anything we can add from TVRage with self.show.lock: logger.log(u"Attempting to supplement show info with info from TVRage", logger.DEBUG) self.show.loadLatestFromTVRage() if self.show.tvrid == 0: self.show.setTVRID() sickbeard.showQueueScheduler.action.refreshShow(self.show, True) # @UndefinedVariable class QueueItemForceUpdate(QueueItemUpdate): def __init__(self, show=None): ShowQueueItem.__init__(self, ShowQueueActions.FORCEUPDATE, show) self.force = True
17,794
Python
.py
344
39.80814
201
0.627697
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,318
autoPostProcesser.py
midgetspy_Sick-Beard/sickbeard/autoPostProcesser.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os.path import sickbeard from sickbeard import logger from sickbeard import encodingKludge as ek from sickbeard import processTV class PostProcesser(): def run(self): if not sickbeard.PROCESS_AUTOMATICALLY: return if not ek.ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR): logger.log(u"Automatic post-processing attempted but dir " + sickbeard.TV_DOWNLOAD_DIR + " doesn't exist", logger.ERROR) return if not ek.ek(os.path.isabs, sickbeard.TV_DOWNLOAD_DIR): logger.log(u"Automatic post-processing attempted but dir " + sickbeard.TV_DOWNLOAD_DIR + " is relative (and probably not what you really want to process)", logger.ERROR) return processTV.processDir(sickbeard.TV_DOWNLOAD_DIR, method='Automatic')
1,597
Python
.py
33
43.060606
181
0.725275
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,319
__init__.py
midgetspy_Sick-Beard/sickbeard/__init__.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import cherrypy import datetime import os import re import socket import sqlite3 import subprocess import sys import urllib import webbrowser from threading import Lock # apparently py2exe won't build these unless they're imported somewhere from sickbeard import providers, metadata from providers import ezrss, tvtorrents, torrentleech, btn, newznab, womble, omgwtfnzbs, hdbits from sickbeard.config import CheckSection, check_setting_int, check_setting_str, ConfigMigrator from sickbeard import searchCurrent, searchBacklog, showUpdater, versionChecker, properFinder, autoPostProcesser from sickbeard import helpers, db, exceptions, show_queue, search_queue, scheduler from sickbeard import logger from sickbeard import naming from common import SD, SKIPPED, NAMING_REPEAT from sickbeard.databases import mainDB, cache_db from lib.configobj import ConfigObj invoked_command = None SOCKET_TIMEOUT = 30 PID = None CFG = None CONFIG_FILE = None # this is the version of the config we EXPECT to find CONFIG_VERSION = 6 PROG_DIR = '.' MY_FULLNAME = None MY_NAME = None MY_ARGS = [] SYS_ENCODING = '' DATA_DIR = '' CREATEPID = False PIDFILE = '' DAEMON = None NO_RESIZE = False backlogSearchScheduler = None currentSearchScheduler = None showUpdateScheduler = None versionCheckScheduler = None showQueueScheduler = None searchQueueScheduler = None properFinderScheduler = None autoPostProcesserScheduler = None showList = None loadingShowList = None providerList = [] newznabProviderList = [] metadata_provider_dict = {} NEWEST_VERSION = None NEWEST_VERSION_STRING = None VERSION_NOTIFY = None INIT_LOCK = Lock() __INITIALIZED__ = False started = False ACTUAL_LOG_DIR = None LOG_DIR = None WEB_PORT = None WEB_LOG = None WEB_ROOT = None WEB_USERNAME = None WEB_PASSWORD = None WEB_HOST = None WEB_IPV6 = None ANON_REDIRECT = None DISPLAY_ALL_SEASONS = None USE_API = False API_KEY = None ENABLE_HTTPS = False HTTPS_CERT = None HTTPS_KEY = None LAUNCH_BROWSER = None CACHE_DIR = None ACTUAL_CACHE_DIR = None ROOT_DIRS = None USE_LISTVIEW = None METADATA_XBMC = None METADATA_XBMC_12PLUS = None METADATA_MEDIABROWSER = None METADATA_PS3 = None METADATA_WDTV = None METADATA_TIVO = None METADATA_MEDE8ER = None QUALITY_DEFAULT = None STATUS_DEFAULT = None FLATTEN_FOLDERS_DEFAULT = None PROVIDER_ORDER = [] NAMING_MULTI_EP = None NAMING_PATTERN = None NAMING_ABD_PATTERN = None NAMING_CUSTOM_ABD = None NAMING_FORCE_FOLDERS = False TVDB_API_KEY = '9DAF49C96CBF8DAC' TVDB_BASE_URL = None TVDB_API_PARMS = {} USE_NZBS = None USE_TORRENTS = None NZB_METHOD = None NZB_DIR = None USENET_RETENTION = None DOWNLOAD_PROPERS = None SEARCH_FREQUENCY = None BACKLOG_SEARCH_FREQUENCY = 21 MIN_SEARCH_FREQUENCY = 10 DEFAULT_SEARCH_FREQUENCY = 40 POSTPROCESS_FREQUENCY = None MIN_POSTPROCESS_FREQUENCY = 5 DEFAULT_POSTPROCESS_FREQUENCY = 10 EZRSS = False HDBITS = False HDBITS_USERNAME = None HDBITS_PASSKEY = None TVTORRENTS = False TVTORRENTS_DIGEST = None TVTORRENTS_HASH = None TORRENTLEECH = False TORRENTLEECH_KEY = None BTN = False BTN_API_KEY = None NEWZNAB_DATA = None TORRENT_DIR = None ADD_SHOWS_WO_DIR = None CREATE_MISSING_SHOW_DIRS = None RENAME_EPISODES = False PROCESS_AUTOMATICALLY = False KEEP_PROCESSED_DIR = False MOVE_ASSOCIATED_FILES = False FILTER_ASSOCIATED_FILES = None TV_DOWNLOAD_DIR = None NZBS = False NZBS_UID = None NZBS_HASH = None WOMBLE = False OMGWTFNZBS = False OMGWTFNZBS_USERNAME = None OMGWTFNZBS_APIKEY = None SAB_USERNAME = None SAB_PASSWORD = None SAB_APIKEY = None SAB_CATEGORY = None SAB_HOST = '' NZBGET_USERNAME = None NZBGET_PASSWORD = None NZBGET_CATEGORY = None NZBGET_HOST = None USE_XBMC = False XBMC_ALWAYS_ON = True XBMC_NOTIFY_ONSNATCH = False XBMC_NOTIFY_ONDOWNLOAD = False XBMC_UPDATE_LIBRARY = False XBMC_UPDATE_FULL = False XBMC_UPDATE_ONLYFIRST = False XBMC_HOST = '' XBMC_USERNAME = None XBMC_PASSWORD = None USE_PLEX = False PLEX_NOTIFY_ONSNATCH = False PLEX_NOTIFY_ONDOWNLOAD = False PLEX_UPDATE_LIBRARY = False PLEX_SERVER_HOST = None PLEX_HOST = None PLEX_USERNAME = None PLEX_PASSWORD = None USE_GROWL = False GROWL_NOTIFY_ONSNATCH = False GROWL_NOTIFY_ONDOWNLOAD = False GROWL_HOST = '' GROWL_PASSWORD = None USE_PROWL = False PROWL_NOTIFY_ONSNATCH = False PROWL_NOTIFY_ONDOWNLOAD = False PROWL_API = None PROWL_PRIORITY = 0 USE_TWITTER = False TWITTER_NOTIFY_ONSNATCH = False TWITTER_NOTIFY_ONDOWNLOAD = False TWITTER_USERNAME = None TWITTER_PASSWORD = None TWITTER_PREFIX = None USE_BOXCAR2 = False BOXCAR2_NOTIFY_ONSNATCH = False BOXCAR2_NOTIFY_ONDOWNLOAD = False BOXCAR2_ACCESS_TOKEN = None BOXCAR2_SOUND = None USE_PUSHOVER = False PUSHOVER_NOTIFY_ONSNATCH = False PUSHOVER_NOTIFY_ONDOWNLOAD = False PUSHOVER_USERKEY = None PUSHOVER_PRIORITY = 0 PUSHOVER_DEVICE = None PUSHOVER_SOUND = None USE_LIBNOTIFY = False LIBNOTIFY_NOTIFY_ONSNATCH = False LIBNOTIFY_NOTIFY_ONDOWNLOAD = False USE_NMJ = False NMJ_HOST = None NMJ_DATABASE = None NMJ_MOUNT = None USE_SYNOINDEX = False SYNOINDEX_NOTIFY_ONSNATCH = False SYNOINDEX_NOTIFY_ONDOWNLOAD = False SYNOINDEX_UPDATE_LIBRARY = False USE_NMJv2 = False NMJv2_HOST = None NMJv2_DATABASE = None NMJv2_DBLOC = None USE_TRAKT = False TRAKT_USERNAME = None TRAKT_PASSWORD = None TRAKT_API = '' USE_PYTIVO = False PYTIVO_NOTIFY_ONSNATCH = False PYTIVO_NOTIFY_ONDOWNLOAD = False PYTIVO_UPDATE_LIBRARY = False PYTIVO_HOST = '' PYTIVO_SHARE_NAME = '' PYTIVO_TIVO_NAME = '' USE_NMA = False NMA_NOTIFY_ONSNATCH = False NMA_NOTIFY_ONDOWNLOAD = False NMA_API = None NMA_PRIORITY = 0 USE_PUSHALOT = False PUSHALOT_NOTIFY_ONSNATCH = False PUSHALOT_NOTIFY_ONDOWNLOAD = False PUSHALOT_AUTHORIZATIONTOKEN = None PUSHALOT_SILENT = False PUSHALOT_IMPORTANT = False USE_PUSHBULLET = False PUSHBULLET_NOTIFY_ONSNATCH = False PUSHBULLET_NOTIFY_ONDOWNLOAD = False PUSHBULLET_ACCESS_TOKEN = None PUSHBULLET_DEVICE_IDEN = None USE_SLACK = False SLACK_NOTIFY_ONSNATCH = False SLACK_NOTIFY_ONDOWNLOAD = False SLACK_ACCESS_TOKEN = None SLACK_CHANNEL = None SLACK_BOT_NAME = None SLACK_ICON_URL = None COMING_EPS_LAYOUT = None COMING_EPS_DISPLAY_PAUSED = None COMING_EPS_SORT = None EXTRA_SCRIPTS = [] GIT_PATH = None IGNORE_WORDS = "german,french,core2hd,dutch,swedish,480p" __INITIALIZED__ = False def get_backlog_cycle_time(): cycletime = SEARCH_FREQUENCY * 2 + 7 return max([cycletime, 720]) def initialize(consoleLogging=True): with INIT_LOCK: global ACTUAL_LOG_DIR, LOG_DIR, WEB_PORT, WEB_LOG, WEB_ROOT, WEB_USERNAME, WEB_PASSWORD, WEB_HOST, WEB_IPV6, USE_API, API_KEY, ENABLE_HTTPS, HTTPS_CERT, HTTPS_KEY, \ USE_NZBS, USE_TORRENTS, NZB_METHOD, NZB_DIR, DOWNLOAD_PROPERS, \ SAB_USERNAME, SAB_PASSWORD, SAB_APIKEY, SAB_CATEGORY, SAB_HOST, \ NZBGET_USERNAME, NZBGET_PASSWORD, NZBGET_CATEGORY, NZBGET_HOST, currentSearchScheduler, backlogSearchScheduler, \ USE_XBMC, XBMC_ALWAYS_ON, XBMC_NOTIFY_ONSNATCH, XBMC_NOTIFY_ONDOWNLOAD, XBMC_UPDATE_FULL, XBMC_UPDATE_ONLYFIRST, \ XBMC_UPDATE_LIBRARY, XBMC_HOST, XBMC_USERNAME, XBMC_PASSWORD, \ USE_TRAKT, TRAKT_USERNAME, TRAKT_PASSWORD, TRAKT_API, \ USE_PLEX, PLEX_NOTIFY_ONSNATCH, PLEX_NOTIFY_ONDOWNLOAD, PLEX_UPDATE_LIBRARY, \ PLEX_SERVER_HOST, PLEX_HOST, PLEX_USERNAME, PLEX_PASSWORD, \ showUpdateScheduler, __INITIALIZED__, LAUNCH_BROWSER, showList, loadingShowList, \ NEWZNAB_DATA, NZBS, NZBS_UID, NZBS_HASH, EZRSS, HDBITS, HDBITS_USERNAME, HDBITS_PASSKEY, TVTORRENTS, TVTORRENTS_DIGEST, TVTORRENTS_HASH, BTN, BTN_API_KEY, TORRENTLEECH, TORRENTLEECH_KEY, \ TORRENT_DIR, USENET_RETENTION, SOCKET_TIMEOUT, \ SEARCH_FREQUENCY, DEFAULT_SEARCH_FREQUENCY, BACKLOG_SEARCH_FREQUENCY, \ POSTPROCESS_FREQUENCY, DEFAULT_POSTPROCESS_FREQUENCY, MIN_POSTPROCESS_FREQUENCY, \ QUALITY_DEFAULT, FLATTEN_FOLDERS_DEFAULT, STATUS_DEFAULT, \ GROWL_NOTIFY_ONSNATCH, GROWL_NOTIFY_ONDOWNLOAD, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, \ USE_GROWL, GROWL_HOST, GROWL_PASSWORD, USE_PROWL, PROWL_NOTIFY_ONSNATCH, PROWL_NOTIFY_ONDOWNLOAD, PROWL_API, PROWL_PRIORITY, PROG_DIR, \ USE_PYTIVO, PYTIVO_NOTIFY_ONSNATCH, PYTIVO_NOTIFY_ONDOWNLOAD, PYTIVO_UPDATE_LIBRARY, PYTIVO_HOST, PYTIVO_SHARE_NAME, PYTIVO_TIVO_NAME, \ USE_NMA, NMA_NOTIFY_ONSNATCH, NMA_NOTIFY_ONDOWNLOAD, NMA_API, NMA_PRIORITY, \ USE_PUSHALOT, PUSHALOT_NOTIFY_ONSNATCH, PUSHALOT_NOTIFY_ONDOWNLOAD, PUSHALOT_AUTHORIZATIONTOKEN, PUSHALOT_SILENT, PUSHALOT_IMPORTANT, \ USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, PUSHBULLET_ACCESS_TOKEN, PUSHBULLET_DEVICE_IDEN, \ USE_SLACK, SLACK_NOTIFY_ONSNATCH, SLACK_NOTIFY_ONDOWNLOAD, SLACK_ACCESS_TOKEN, SLACK_CHANNEL, SLACK_BOT_NAME, SLACK_ICON_URL, \ versionCheckScheduler, VERSION_NOTIFY, PROCESS_AUTOMATICALLY, \ KEEP_PROCESSED_DIR, TV_DOWNLOAD_DIR, TVDB_BASE_URL, MIN_SEARCH_FREQUENCY, \ showQueueScheduler, searchQueueScheduler, ROOT_DIRS, CACHE_DIR, ACTUAL_CACHE_DIR, TVDB_API_PARMS, \ NAMING_PATTERN, NAMING_MULTI_EP, NAMING_FORCE_FOLDERS, NAMING_ABD_PATTERN, NAMING_CUSTOM_ABD, \ RENAME_EPISODES, properFinderScheduler, PROVIDER_ORDER, autoPostProcesserScheduler, \ WOMBLE, OMGWTFNZBS, OMGWTFNZBS_USERNAME, OMGWTFNZBS_APIKEY, providerList, newznabProviderList, \ EXTRA_SCRIPTS, USE_TWITTER, TWITTER_USERNAME, TWITTER_PASSWORD, TWITTER_PREFIX, \ USE_BOXCAR2, BOXCAR2_ACCESS_TOKEN, BOXCAR2_NOTIFY_ONDOWNLOAD, BOXCAR2_NOTIFY_ONSNATCH, BOXCAR2_SOUND, \ USE_PUSHOVER, PUSHOVER_USERKEY, PUSHOVER_NOTIFY_ONDOWNLOAD, PUSHOVER_NOTIFY_ONSNATCH, PUSHOVER_PRIORITY, PUSHOVER_DEVICE, PUSHOVER_SOUND, \ USE_LIBNOTIFY, LIBNOTIFY_NOTIFY_ONSNATCH, LIBNOTIFY_NOTIFY_ONDOWNLOAD, USE_NMJ, NMJ_HOST, NMJ_DATABASE, NMJ_MOUNT, USE_NMJv2, NMJv2_HOST, NMJv2_DATABASE, NMJv2_DBLOC, \ USE_SYNOINDEX, SYNOINDEX_NOTIFY_ONSNATCH, SYNOINDEX_NOTIFY_ONDOWNLOAD, SYNOINDEX_UPDATE_LIBRARY, \ USE_LISTVIEW, METADATA_XBMC, METADATA_XBMC_12PLUS, METADATA_MEDIABROWSER, METADATA_MEDE8ER, METADATA_PS3, metadata_provider_dict, \ GIT_PATH, MOVE_ASSOCIATED_FILES, FILTER_ASSOCIATED_FILES, \ COMING_EPS_LAYOUT, COMING_EPS_SORT, COMING_EPS_DISPLAY_PAUSED, METADATA_WDTV, METADATA_TIVO, IGNORE_WORDS, CREATE_MISSING_SHOW_DIRS, \ ADD_SHOWS_WO_DIR, ANON_REDIRECT, DISPLAY_ALL_SEASONS if __INITIALIZED__: return False socket.setdefaulttimeout(SOCKET_TIMEOUT) CheckSection(CFG, 'General') ACTUAL_LOG_DIR = check_setting_str(CFG, 'General', 'log_dir', 'Logs') # put the log dir inside the data dir, unless an absolute path LOG_DIR = os.path.normpath(os.path.join(DATA_DIR, ACTUAL_LOG_DIR)) if not helpers.makeDir(LOG_DIR): logger.log(u"!!! No log folder, logging to screen only!", logger.ERROR) try: WEB_PORT = check_setting_int(CFG, 'General', 'web_port', 8081) except: WEB_PORT = 8081 if WEB_PORT < 21 or WEB_PORT > 65535: WEB_PORT = 8081 WEB_HOST = check_setting_str(CFG, 'General', 'web_host', '0.0.0.0') WEB_IPV6 = bool(check_setting_int(CFG, 'General', 'web_ipv6', 0)) WEB_ROOT = check_setting_str(CFG, 'General', 'web_root', '').rstrip("/") WEB_LOG = bool(check_setting_int(CFG, 'General', 'web_log', 0)) WEB_USERNAME = check_setting_str(CFG, 'General', 'web_username', '') WEB_PASSWORD = check_setting_str(CFG, 'General', 'web_password', '') LAUNCH_BROWSER = bool(check_setting_int(CFG, 'General', 'launch_browser', 1)) ANON_REDIRECT = check_setting_str(CFG, 'General', 'anon_redirect', 'http://dereferer.org/?') # attempt to help prevent users from breaking links by using a bad url if not ANON_REDIRECT.endswith('?'): ANON_REDIRECT = '' DISPLAY_ALL_SEASONS = check_setting_int(CFG, 'General', 'display_all_seasons', 1) USE_API = bool(check_setting_int(CFG, 'General', 'use_api', 0)) API_KEY = check_setting_str(CFG, 'General', 'api_key', '') ENABLE_HTTPS = bool(check_setting_int(CFG, 'General', 'enable_https', 0)) HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', 'server.crt') HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', 'server.key') ACTUAL_CACHE_DIR = check_setting_str(CFG, 'General', 'cache_dir', 'cache') # fix bad configs due to buggy code if ACTUAL_CACHE_DIR == 'None': ACTUAL_CACHE_DIR = 'cache' # unless they specify, put the cache dir inside the data dir if not os.path.isabs(ACTUAL_CACHE_DIR): CACHE_DIR = os.path.join(DATA_DIR, ACTUAL_CACHE_DIR) else: CACHE_DIR = ACTUAL_CACHE_DIR if not helpers.makeDir(CACHE_DIR): logger.log(u"!!! Creating local cache dir failed, using system default", logger.ERROR) CACHE_DIR = None ROOT_DIRS = check_setting_str(CFG, 'General', 'root_dirs', '') if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', ROOT_DIRS): ROOT_DIRS = '' # Set our common tvdb_api options here TVDB_API_PARMS = {'apikey': TVDB_API_KEY, 'language': 'en', 'useZip': True} if CACHE_DIR: TVDB_API_PARMS['cache'] = os.path.join(CACHE_DIR, 'tvdb') TVDB_BASE_URL = 'http://thetvdb.com/api/' + TVDB_API_KEY QUALITY_DEFAULT = check_setting_int(CFG, 'General', 'quality_default', SD) STATUS_DEFAULT = check_setting_int(CFG, 'General', 'status_default', SKIPPED) VERSION_NOTIFY = check_setting_int(CFG, 'General', 'version_notify', 1) FLATTEN_FOLDERS_DEFAULT = bool(check_setting_int(CFG, 'General', 'flatten_folders_default', 0)) PROVIDER_ORDER = check_setting_str(CFG, 'General', 'provider_order', '').split() NAMING_PATTERN = check_setting_str(CFG, 'General', 'naming_pattern', '%SN - %Sx%0E - %EN') NAMING_ABD_PATTERN = check_setting_str(CFG, 'General', 'naming_abd_pattern', '%SN - %A-D - %EN') NAMING_CUSTOM_ABD = check_setting_int(CFG, 'General', 'naming_custom_abd', 0) NAMING_MULTI_EP = check_setting_int(CFG, 'General', 'naming_multi_ep', 1) NAMING_FORCE_FOLDERS = naming.check_force_season_folders() USE_NZBS = bool(check_setting_int(CFG, 'General', 'use_nzbs', 1)) USE_TORRENTS = bool(check_setting_int(CFG, 'General', 'use_torrents', 0)) NZB_METHOD = check_setting_str(CFG, 'General', 'nzb_method', 'blackhole') if NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): NZB_METHOD = 'blackhole' DOWNLOAD_PROPERS = bool(check_setting_int(CFG, 'General', 'download_propers', 1)) USENET_RETENTION = check_setting_int(CFG, 'General', 'usenet_retention', 500) SEARCH_FREQUENCY = check_setting_int(CFG, 'General', 'search_frequency', DEFAULT_SEARCH_FREQUENCY) if SEARCH_FREQUENCY < MIN_SEARCH_FREQUENCY: SEARCH_FREQUENCY = MIN_SEARCH_FREQUENCY POSTPROCESS_FREQUENCY = check_setting_int(CFG, 'General', 'postprocess_frequency', DEFAULT_POSTPROCESS_FREQUENCY) if POSTPROCESS_FREQUENCY < MIN_POSTPROCESS_FREQUENCY: POSTPROCESS_FREQUENCY = MIN_POSTPROCESS_FREQUENCY TV_DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'tv_download_dir', '') PROCESS_AUTOMATICALLY = check_setting_int(CFG, 'General', 'process_automatically', 0) RENAME_EPISODES = check_setting_int(CFG, 'General', 'rename_episodes', 1) KEEP_PROCESSED_DIR = check_setting_int(CFG, 'General', 'keep_processed_dir', 1) MOVE_ASSOCIATED_FILES = check_setting_int(CFG, 'General', 'move_associated_files', 0) FILTER_ASSOCIATED_FILES = check_setting_str(CFG, 'General', 'filter_associated_files', '') CREATE_MISSING_SHOW_DIRS = check_setting_int(CFG, 'General', 'create_missing_show_dirs', 0) ADD_SHOWS_WO_DIR = check_setting_int(CFG, 'General', 'add_shows_wo_dir', 0) EZRSS = bool(check_setting_int(CFG, 'General', 'use_torrent', 0)) if not EZRSS: CheckSection(CFG, 'EZRSS') EZRSS = bool(check_setting_int(CFG, 'EZRSS', 'ezrss', 0)) GIT_PATH = check_setting_str(CFG, 'General', 'git_path', '') IGNORE_WORDS = check_setting_str(CFG, 'General', 'ignore_words', IGNORE_WORDS) EXTRA_SCRIPTS = [x.strip() for x in check_setting_str(CFG, 'General', 'extra_scripts', '').split('|') if x.strip()] USE_LISTVIEW = bool(check_setting_int(CFG, 'General', 'use_listview', 0)) METADATA_XBMC = check_setting_str(CFG, 'General', 'metadata_xbmc', '0|0|0|0|0|0|0|0|0|0') METADATA_XBMC_12PLUS = check_setting_str(CFG, 'General', 'metadata_xbmc_12plus', '0|0|0|0|0|0|0|0|0|0') METADATA_MEDIABROWSER = check_setting_str(CFG, 'General', 'metadata_mediabrowser', '0|0|0|0|0|0|0|0|0|0') METADATA_PS3 = check_setting_str(CFG, 'General', 'metadata_ps3', '0|0|0|0|0|0|0|0|0|0') METADATA_WDTV = check_setting_str(CFG, 'General', 'metadata_wdtv', '0|0|0|0|0|0|0|0|0|0') METADATA_TIVO = check_setting_str(CFG, 'General', 'metadata_tivo', '0|0|0|0|0|0|0|0|0|0') METADATA_MEDE8ER = check_setting_str(CFG, 'General', 'metadata_mede8er', '0|0|0|0|0|0|0|0|0|0') CheckSection(CFG, 'GUI') COMING_EPS_LAYOUT = check_setting_str(CFG, 'GUI', 'coming_eps_layout', 'banner') COMING_EPS_DISPLAY_PAUSED = bool(check_setting_int(CFG, 'GUI', 'coming_eps_display_paused', 0)) COMING_EPS_SORT = check_setting_str(CFG, 'GUI', 'coming_eps_sort', 'date') CheckSection(CFG, 'Newznab') NEWZNAB_DATA = check_setting_str(CFG, 'Newznab', 'newznab_data', '') CheckSection(CFG, 'Blackhole') NZB_DIR = check_setting_str(CFG, 'Blackhole', 'nzb_dir', '') TORRENT_DIR = check_setting_str(CFG, 'Blackhole', 'torrent_dir', '') CheckSection(CFG, 'HDBITS') HDBITS = bool(check_setting_int(CFG, 'HDBITS', 'hdbits', 0)) HDBITS_USERNAME = check_setting_str(CFG, 'HDBITS', 'hdbits_username', '') HDBITS_PASSKEY = check_setting_str(CFG, 'HDBITS', 'hdbits_passkey', '') CheckSection(CFG, 'TVTORRENTS') TVTORRENTS = bool(check_setting_int(CFG, 'TVTORRENTS', 'tvtorrents', 0)) TVTORRENTS_DIGEST = check_setting_str(CFG, 'TVTORRENTS', 'tvtorrents_digest', '') TVTORRENTS_HASH = check_setting_str(CFG, 'TVTORRENTS', 'tvtorrents_hash', '') CheckSection(CFG, 'BTN') BTN = bool(check_setting_int(CFG, 'BTN', 'btn', 0)) BTN_API_KEY = check_setting_str(CFG, 'BTN', 'btn_api_key', '') CheckSection(CFG, 'TorrentLeech') TORRENTLEECH = bool(check_setting_int(CFG, 'TorrentLeech', 'torrentleech', 0)) TORRENTLEECH_KEY = check_setting_str(CFG, 'TorrentLeech', 'torrentleech_key', '') CheckSection(CFG, 'NZBs') NZBS = bool(check_setting_int(CFG, 'NZBs', 'nzbs', 0)) NZBS_UID = check_setting_str(CFG, 'NZBs', 'nzbs_uid', '') NZBS_HASH = check_setting_str(CFG, 'NZBs', 'nzbs_hash', '') CheckSection(CFG, 'Womble') WOMBLE = bool(check_setting_int(CFG, 'Womble', 'womble', 1)) CheckSection(CFG, 'omgwtfnzbs') OMGWTFNZBS = bool(check_setting_int(CFG, 'omgwtfnzbs', 'omgwtfnzbs', 0)) OMGWTFNZBS_USERNAME = check_setting_str(CFG, 'omgwtfnzbs', 'omgwtfnzbs_username', '') OMGWTFNZBS_APIKEY = check_setting_str(CFG, 'omgwtfnzbs', 'omgwtfnzbs_apikey', '') CheckSection(CFG, 'SABnzbd') SAB_USERNAME = check_setting_str(CFG, 'SABnzbd', 'sab_username', '') SAB_PASSWORD = check_setting_str(CFG, 'SABnzbd', 'sab_password', '') SAB_APIKEY = check_setting_str(CFG, 'SABnzbd', 'sab_apikey', '') SAB_CATEGORY = check_setting_str(CFG, 'SABnzbd', 'sab_category', 'tv') SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '') CheckSection(CFG, 'NZBget') NZBGET_USERNAME = check_setting_str(CFG, 'NZBget', 'nzbget_username', 'nzbget') NZBGET_PASSWORD = check_setting_str(CFG, 'NZBget', 'nzbget_password', 'tegbzn6789') NZBGET_CATEGORY = check_setting_str(CFG, 'NZBget', 'nzbget_category', 'tv') NZBGET_HOST = check_setting_str(CFG, 'NZBget', 'nzbget_host', '') CheckSection(CFG, 'XBMC') USE_XBMC = bool(check_setting_int(CFG, 'XBMC', 'use_xbmc', 0)) XBMC_ALWAYS_ON = bool(check_setting_int(CFG, 'XBMC', 'xbmc_always_on', 1)) XBMC_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_onsnatch', 0)) XBMC_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'XBMC', 'xbmc_notify_ondownload', 0)) XBMC_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_library', 0)) XBMC_UPDATE_FULL = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_full', 0)) XBMC_UPDATE_ONLYFIRST = bool(check_setting_int(CFG, 'XBMC', 'xbmc_update_onlyfirst', 0)) XBMC_HOST = check_setting_str(CFG, 'XBMC', 'xbmc_host', '') XBMC_USERNAME = check_setting_str(CFG, 'XBMC', 'xbmc_username', '') XBMC_PASSWORD = check_setting_str(CFG, 'XBMC', 'xbmc_password', '') CheckSection(CFG, 'Plex') USE_PLEX = bool(check_setting_int(CFG, 'Plex', 'use_plex', 0)) PLEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Plex', 'plex_notify_onsnatch', 0)) PLEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Plex', 'plex_notify_ondownload', 0)) PLEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Plex', 'plex_update_library', 0)) PLEX_SERVER_HOST = check_setting_str(CFG, 'Plex', 'plex_server_host', '') PLEX_HOST = check_setting_str(CFG, 'Plex', 'plex_host', '') PLEX_USERNAME = check_setting_str(CFG, 'Plex', 'plex_username', '') PLEX_PASSWORD = check_setting_str(CFG, 'Plex', 'plex_password', '') CheckSection(CFG, 'Growl') USE_GROWL = bool(check_setting_int(CFG, 'Growl', 'use_growl', 0)) GROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Growl', 'growl_notify_onsnatch', 0)) GROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Growl', 'growl_notify_ondownload', 0)) GROWL_HOST = check_setting_str(CFG, 'Growl', 'growl_host', '') GROWL_PASSWORD = check_setting_str(CFG, 'Growl', 'growl_password', '') CheckSection(CFG, 'Prowl') USE_PROWL = bool(check_setting_int(CFG, 'Prowl', 'use_prowl', 0)) PROWL_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_onsnatch', 0)) PROWL_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Prowl', 'prowl_notify_ondownload', 0)) PROWL_API = check_setting_str(CFG, 'Prowl', 'prowl_api', '') PROWL_PRIORITY = check_setting_int(CFG, 'Prowl', 'prowl_priority', 0) CheckSection(CFG, 'Twitter') USE_TWITTER = bool(check_setting_int(CFG, 'Twitter', 'use_twitter', 0)) TWITTER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_onsnatch', 0)) TWITTER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Twitter', 'twitter_notify_ondownload', 0)) TWITTER_USERNAME = check_setting_str(CFG, 'Twitter', 'twitter_username', '') TWITTER_PASSWORD = check_setting_str(CFG, 'Twitter', 'twitter_password', '') TWITTER_PREFIX = check_setting_str(CFG, 'Twitter', 'twitter_prefix', 'Sick Beard') CheckSection(CFG, 'Boxcar2') USE_BOXCAR2 = bool(check_setting_int(CFG, 'Boxcar2', 'use_boxcar2', 0)) BOXCAR2_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_onsnatch', 0)) BOXCAR2_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Boxcar2', 'boxcar2_notify_ondownload', 0)) BOXCAR2_ACCESS_TOKEN = check_setting_str(CFG, 'Boxcar2', 'boxcar2_access_token', '') BOXCAR2_SOUND = check_setting_str(CFG, 'Boxcar2', 'boxcar2_sound', 'default') CheckSection(CFG, 'Pushover') USE_PUSHOVER = bool(check_setting_int(CFG, 'Pushover', 'use_pushover', 0)) PUSHOVER_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_onsnatch', 0)) PUSHOVER_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushover', 'pushover_notify_ondownload', 0)) PUSHOVER_USERKEY = check_setting_str(CFG, 'Pushover', 'pushover_userkey', '') PUSHOVER_PRIORITY = check_setting_int(CFG, 'Pushover', 'pushover_priority', 0) PUSHOVER_DEVICE = check_setting_str(CFG, 'Pushover', 'pushover_device', 'all') PUSHOVER_SOUND = check_setting_str(CFG, 'Pushover', 'pushover_sound', 'pushover') CheckSection(CFG, 'Libnotify') USE_LIBNOTIFY = bool(check_setting_int(CFG, 'Libnotify', 'use_libnotify', 0)) LIBNOTIFY_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_onsnatch', 0)) LIBNOTIFY_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Libnotify', 'libnotify_notify_ondownload', 0)) CheckSection(CFG, 'NMJ') USE_NMJ = bool(check_setting_int(CFG, 'NMJ', 'use_nmj', 0)) NMJ_HOST = check_setting_str(CFG, 'NMJ', 'nmj_host', '') NMJ_DATABASE = check_setting_str(CFG, 'NMJ', 'nmj_database', '') NMJ_MOUNT = check_setting_str(CFG, 'NMJ', 'nmj_mount', '') CheckSection(CFG, 'NMJv2') USE_NMJv2 = bool(check_setting_int(CFG, 'NMJv2', 'use_nmjv2', 0)) NMJv2_HOST = check_setting_str(CFG, 'NMJv2', 'nmjv2_host', '') NMJv2_DATABASE = check_setting_str(CFG, 'NMJv2', 'nmjv2_database', '') NMJv2_DBLOC = check_setting_str(CFG, 'NMJv2', 'nmjv2_dbloc', '') CheckSection(CFG, 'Synology') USE_SYNOINDEX = bool(check_setting_int(CFG, 'Synology', 'use_synoindex', 0)) SYNOINDEX_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Synology', 'synoindex_notify_onsnatch', 0)) SYNOINDEX_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Synology', 'synoindex_notify_ondownload', 0)) SYNOINDEX_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'Synology', 'synoindex_update_library', 0)) CheckSection(CFG, 'Trakt') USE_TRAKT = bool(check_setting_int(CFG, 'Trakt', 'use_trakt', 0)) TRAKT_USERNAME = check_setting_str(CFG, 'Trakt', 'trakt_username', '') TRAKT_PASSWORD = check_setting_str(CFG, 'Trakt', 'trakt_password', '') TRAKT_API = check_setting_str(CFG, 'Trakt', 'trakt_api', '') CheckSection(CFG, 'pyTivo') USE_PYTIVO = bool(check_setting_int(CFG, 'pyTivo', 'use_pytivo', 0)) PYTIVO_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_onsnatch', 0)) PYTIVO_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'pyTivo', 'pytivo_notify_ondownload', 0)) PYTIVO_UPDATE_LIBRARY = bool(check_setting_int(CFG, 'pyTivo', 'pyTivo_update_library', 0)) PYTIVO_HOST = check_setting_str(CFG, 'pyTivo', 'pytivo_host', '') PYTIVO_SHARE_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_share_name', '') PYTIVO_TIVO_NAME = check_setting_str(CFG, 'pyTivo', 'pytivo_tivo_name', '') CheckSection(CFG, 'NMA') USE_NMA = bool(check_setting_int(CFG, 'NMA', 'use_nma', 0)) NMA_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'NMA', 'nma_notify_onsnatch', 0)) NMA_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'NMA', 'nma_notify_ondownload', 0)) NMA_API = check_setting_str(CFG, 'NMA', 'nma_api', '') NMA_PRIORITY = check_setting_int(CFG, 'NMA', 'nma_priority', 0) CheckSection(CFG, 'Pushalot') USE_PUSHALOT = bool(check_setting_int(CFG, 'Pushalot', 'use_pushalot', 0)) PUSHALOT_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_onsnatch', 0)) PUSHALOT_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_notify_ondownload', 0)) PUSHALOT_AUTHORIZATIONTOKEN = check_setting_str(CFG, 'Pushalot', 'pushalot_authorizationtoken', '') PUSHALOT_SILENT = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_silent', 0)) PUSHALOT_IMPORTANT = bool(check_setting_int(CFG, 'Pushalot', 'pushalot_important', 0)) CheckSection(CFG, 'Pushbullet') USE_PUSHBULLET = bool(check_setting_int(CFG, 'Pushbullet', 'use_pushbullet', 0)) PUSHBULLET_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_onsnatch', 0)) PUSHBULLET_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Pushbullet', 'pushbullet_notify_ondownload', 0)) PUSHBULLET_ACCESS_TOKEN = check_setting_str(CFG, 'Pushbullet', 'pushbullet_access_token', '') PUSHBULLET_DEVICE_IDEN = check_setting_str(CFG, 'Pushbullet', 'pushbullet_device_iden', '') CheckSection(CFG, 'Slack') USE_SLACK = bool(check_setting_int(CFG, 'Slack', 'use_slack', 0)) SLACK_NOTIFY_ONSNATCH = bool(check_setting_int(CFG, 'Slack', 'slack_notify_onsnatch', 0)) SLACK_NOTIFY_ONDOWNLOAD = bool(check_setting_int(CFG, 'Slack', 'slack_notify_ondownload', 0)) SLACK_ACCESS_TOKEN = check_setting_str(CFG, 'Slack', 'slack_access_token', '') SLACK_CHANNEL = check_setting_str(CFG, 'Slack', 'slack_channel', '') SLACK_BOT_NAME = check_setting_str(CFG, 'Slack', 'slack_bot_name', '') SLACK_ICON_URL = check_setting_str(CFG, 'Slack', 'slack_icon_url', '') if not os.path.isfile(CONFIG_FILE): logger.log(u"Unable to find '" + CONFIG_FILE + "', all settings will be default!", logger.DEBUG) save_config() # start up all the threads logger.sb_log_instance.initLogging(consoleLogging=consoleLogging) # initialize the main SB database db.upgradeDatabase(db.DBConnection(), mainDB.InitialSchema) # initialize the cache database db.upgradeDatabase(db.DBConnection("cache.db"), cache_db.InitialSchema) # fix up any db problems db.sanityCheckDatabase(db.DBConnection(), mainDB.MainSanityCheck) # migrate the config if it needs it migrator = ConfigMigrator(CFG) migrator.migrate_config() # initialize metadata_providers metadata_provider_dict = metadata.get_metadata_generator_dict() for cur_metadata_tuple in [(METADATA_XBMC, metadata.xbmc), (METADATA_XBMC_12PLUS, metadata.xbmc_12plus), (METADATA_MEDIABROWSER, metadata.mediabrowser), (METADATA_PS3, metadata.ps3), (METADATA_WDTV, metadata.wdtv), (METADATA_TIVO, metadata.tivo), (METADATA_MEDE8ER, metadata.mede8er) ]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) metadata_provider_dict[tmp_provider.name] = tmp_provider # initialize newznab providers newznabProviderList = providers.getNewznabProviderList(NEWZNAB_DATA) providerList = providers.makeProviderList() # initialize schedulers # updaters versionCheckScheduler = scheduler.Scheduler(versionChecker.CheckVersion(), cycleTime=datetime.timedelta(hours=12), threadName="CHECKVERSION" ) showQueueScheduler = scheduler.Scheduler(show_queue.ShowQueue(), cycleTime=datetime.timedelta(seconds=3), threadName="SHOWQUEUE", silent=True) showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(), cycleTime=datetime.timedelta(hours=1), threadName="SHOWUPDATER", start_time=datetime.time(hour=3) # 3 AM ) # searchers searchQueueScheduler = scheduler.Scheduler(search_queue.SearchQueue(), cycleTime=datetime.timedelta(seconds=3), threadName="SEARCHQUEUE", silent=True ) currentSearchScheduler = scheduler.Scheduler(searchCurrent.CurrentSearcher(), cycleTime=datetime.timedelta(minutes=SEARCH_FREQUENCY), threadName="SEARCH", run_delay=datetime.timedelta(minutes=1) ) backlogSearchScheduler = searchBacklog.BacklogSearchScheduler(searchBacklog.BacklogSearcher(), cycleTime=datetime.timedelta(minutes=get_backlog_cycle_time()), threadName="BACKLOG", run_delay=datetime.timedelta(minutes=7) ) backlogSearchScheduler.action.cycleTime = BACKLOG_SEARCH_FREQUENCY properFinderScheduler = scheduler.Scheduler(properFinder.ProperFinder(), cycleTime=datetime.timedelta(hours=1), threadName="FINDPROPERS", start_time=datetime.time(hour=1) # 1 AM ) # processors autoPostProcesserScheduler = scheduler.Scheduler(autoPostProcesser.PostProcesser(), cycleTime=datetime.timedelta(minutes=POSTPROCESS_FREQUENCY), threadName="POSTPROCESSER", run_delay=datetime.timedelta(minutes=5) ) if not PROCESS_AUTOMATICALLY: autoPostProcesserScheduler.silent = True showList = [] loadingShowList = {} __INITIALIZED__ = True return True def start(): global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, \ showUpdateScheduler, versionCheckScheduler, showQueueScheduler, \ properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \ started with INIT_LOCK: if __INITIALIZED__: # start the search scheduler currentSearchScheduler.thread.start() # start the backlog scheduler backlogSearchScheduler.thread.start() # start the show updater showUpdateScheduler.thread.start() # start the version checker versionCheckScheduler.thread.start() # start the queue checker showQueueScheduler.thread.start() # start the search queue checker searchQueueScheduler.thread.start() # start the queue checker properFinderScheduler.thread.start() # start the proper finder autoPostProcesserScheduler.thread.start() started = True def halt(): global __INITIALIZED__, currentSearchScheduler, backlogSearchScheduler, showUpdateScheduler, \ showQueueScheduler, properFinderScheduler, autoPostProcesserScheduler, searchQueueScheduler, \ started with INIT_LOCK: if __INITIALIZED__: logger.log(u"Aborting all threads") # abort all the threads currentSearchScheduler.abort = True logger.log(u"Waiting for the SEARCH thread to exit") try: currentSearchScheduler.thread.join(10) except: pass backlogSearchScheduler.abort = True logger.log(u"Waiting for the BACKLOG thread to exit") try: backlogSearchScheduler.thread.join(10) except: pass showUpdateScheduler.abort = True logger.log(u"Waiting for the SHOWUPDATER thread to exit") try: showUpdateScheduler.thread.join(10) except: pass versionCheckScheduler.abort = True logger.log(u"Waiting for the VERSIONCHECKER thread to exit") try: versionCheckScheduler.thread.join(10) except: pass showQueueScheduler.abort = True logger.log(u"Waiting for the SHOWQUEUE thread to exit") try: showQueueScheduler.thread.join(10) except: pass searchQueueScheduler.abort = True logger.log(u"Waiting for the SEARCHQUEUE thread to exit") try: searchQueueScheduler.thread.join(10) except: pass autoPostProcesserScheduler.abort = True logger.log(u"Waiting for the POSTPROCESSER thread to exit") try: autoPostProcesserScheduler.thread.join(10) except: pass properFinderScheduler.abort = True logger.log(u"Waiting for the PROPERFINDER thread to exit") try: properFinderScheduler.thread.join(10) except: pass __INITIALIZED__ = False def remove_pid_file(PIDFILE): try: if os.path.exists(PIDFILE): os.remove(PIDFILE) except (IOError, OSError): return False return True def sig_handler(signum=None, frame=None): if type(signum) != type(None): logger.log(u"Signal %i caught, saving and exiting..." % int(signum)) saveAndShutdown() def saveAll(): global showList # write all shows logger.log(u"Saving all shows to the database") for show in showList: show.saveToDB() # save config logger.log(u"Saving config file to disk") save_config() def saveAndShutdown(restart=False): halt() saveAll() logger.log(u"Killing cherrypy") cherrypy.engine.exit() if CREATEPID: logger.log(u"Removing pidfile " + str(PIDFILE)) remove_pid_file(PIDFILE) if restart: install_type = versionCheckScheduler.action.install_type popen_list = [] if install_type in ('git', 'source'): popen_list = [sys.executable, MY_FULLNAME] elif install_type == 'win': if hasattr(sys, 'frozen'): # c:\dir\to\updater.exe 12345 c:\dir\to\sickbeard.exe popen_list = [os.path.join(PROG_DIR, 'updater.exe'), str(PID), sys.executable] else: logger.log(u"Unknown SB launch method, please file a bug report about this", logger.ERROR) popen_list = [sys.executable, os.path.join(PROG_DIR, 'updater.py'), str(PID), sys.executable, MY_FULLNAME ] if popen_list: popen_list += MY_ARGS if '--nolaunch' not in popen_list: popen_list += ['--nolaunch'] logger.log(u"Restarting Sick Beard with " + str(popen_list)) logger.close() subprocess.Popen(popen_list, cwd=os.getcwd()) os._exit(0) def invoke_command(to_call, *args, **kwargs): global invoked_command def delegate(): to_call(*args, **kwargs) invoked_command = delegate logger.log(u"Placed invoked command: " + repr(invoked_command) + " for " + repr(to_call) + " with " + repr(args) + " and " + repr(kwargs), logger.DEBUG) def invoke_restart(soft=True): invoke_command(restart, soft=soft) def invoke_shutdown(): invoke_command(saveAndShutdown) def restart(soft=True): if soft: halt() saveAll() #logger.log(u"Restarting cherrypy") #cherrypy.engine.restart() logger.log(u"Re-initializing all data") initialize() else: saveAndShutdown(restart=True) def save_config(): new_config = ConfigObj() new_config.filename = CONFIG_FILE new_config['General'] = {} new_config['General']['config_version'] = CONFIG_VERSION new_config['General']['log_dir'] = ACTUAL_LOG_DIR if ACTUAL_LOG_DIR else 'Logs' new_config['General']['web_port'] = WEB_PORT new_config['General']['web_host'] = WEB_HOST new_config['General']['web_ipv6'] = int(WEB_IPV6) new_config['General']['web_log'] = int(WEB_LOG) new_config['General']['web_root'] = WEB_ROOT new_config['General']['web_username'] = WEB_USERNAME new_config['General']['web_password'] = WEB_PASSWORD new_config['General']['anon_redirect'] = ANON_REDIRECT new_config['General']['display_all_seasons'] = DISPLAY_ALL_SEASONS new_config['General']['use_api'] = int(USE_API) new_config['General']['api_key'] = API_KEY new_config['General']['enable_https'] = int(ENABLE_HTTPS) new_config['General']['https_cert'] = HTTPS_CERT new_config['General']['https_key'] = HTTPS_KEY new_config['General']['use_nzbs'] = int(USE_NZBS) new_config['General']['use_torrents'] = int(USE_TORRENTS) new_config['General']['nzb_method'] = NZB_METHOD new_config['General']['usenet_retention'] = int(USENET_RETENTION) new_config['General']['search_frequency'] = int(SEARCH_FREQUENCY) new_config['General']['postprocess_frequency'] = int(POSTPROCESS_FREQUENCY) new_config['General']['download_propers'] = int(DOWNLOAD_PROPERS) new_config['General']['quality_default'] = int(QUALITY_DEFAULT) new_config['General']['status_default'] = int(STATUS_DEFAULT) new_config['General']['flatten_folders_default'] = int(FLATTEN_FOLDERS_DEFAULT) new_config['General']['provider_order'] = ' '.join(PROVIDER_ORDER) new_config['General']['version_notify'] = int(VERSION_NOTIFY) new_config['General']['naming_pattern'] = NAMING_PATTERN new_config['General']['naming_custom_abd'] = int(NAMING_CUSTOM_ABD) new_config['General']['naming_abd_pattern'] = NAMING_ABD_PATTERN new_config['General']['naming_multi_ep'] = int(NAMING_MULTI_EP) new_config['General']['launch_browser'] = int(LAUNCH_BROWSER) new_config['General']['use_listview'] = int(USE_LISTVIEW) new_config['General']['metadata_xbmc'] = METADATA_XBMC new_config['General']['metadata_xbmc_12plus'] = METADATA_XBMC_12PLUS new_config['General']['metadata_mediabrowser'] = METADATA_MEDIABROWSER new_config['General']['metadata_ps3'] = METADATA_PS3 new_config['General']['metadata_wdtv'] = METADATA_WDTV new_config['General']['metadata_tivo'] = METADATA_TIVO new_config['General']['metadata_mede8er'] = METADATA_MEDE8ER new_config['General']['cache_dir'] = ACTUAL_CACHE_DIR if ACTUAL_CACHE_DIR else 'cache' new_config['General']['root_dirs'] = ROOT_DIRS if ROOT_DIRS else '' new_config['General']['tv_download_dir'] = TV_DOWNLOAD_DIR new_config['General']['keep_processed_dir'] = int(KEEP_PROCESSED_DIR) new_config['General']['move_associated_files'] = int(MOVE_ASSOCIATED_FILES) new_config['General']['filter_associated_files'] = FILTER_ASSOCIATED_FILES new_config['General']['process_automatically'] = int(PROCESS_AUTOMATICALLY) new_config['General']['rename_episodes'] = int(RENAME_EPISODES) new_config['General']['create_missing_show_dirs'] = int(CREATE_MISSING_SHOW_DIRS) new_config['General']['add_shows_wo_dir'] = int(ADD_SHOWS_WO_DIR) new_config['General']['extra_scripts'] = '|'.join(EXTRA_SCRIPTS) new_config['General']['git_path'] = GIT_PATH new_config['General']['ignore_words'] = IGNORE_WORDS new_config['Blackhole'] = {} new_config['Blackhole']['nzb_dir'] = NZB_DIR new_config['Blackhole']['torrent_dir'] = TORRENT_DIR new_config['EZRSS'] = {} new_config['EZRSS']['ezrss'] = int(EZRSS) new_config['HDBITS'] = {} new_config['HDBITS']['hdbits'] = int(HDBITS) new_config['HDBITS']['hdbits_username'] = HDBITS_USERNAME new_config['HDBITS']['hdbits_passkey'] = HDBITS_PASSKEY new_config['TVTORRENTS'] = {} new_config['TVTORRENTS']['tvtorrents'] = int(TVTORRENTS) new_config['TVTORRENTS']['tvtorrents_digest'] = TVTORRENTS_DIGEST new_config['TVTORRENTS']['tvtorrents_hash'] = TVTORRENTS_HASH new_config['BTN'] = {} new_config['BTN']['btn'] = int(BTN) new_config['BTN']['btn_api_key'] = BTN_API_KEY new_config['TorrentLeech'] = {} new_config['TorrentLeech']['torrentleech'] = int(TORRENTLEECH) new_config['TorrentLeech']['torrentleech_key'] = TORRENTLEECH_KEY new_config['NZBs'] = {} new_config['NZBs']['nzbs'] = int(NZBS) new_config['NZBs']['nzbs_uid'] = NZBS_UID new_config['NZBs']['nzbs_hash'] = NZBS_HASH new_config['Womble'] = {} new_config['Womble']['womble'] = int(WOMBLE) new_config['omgwtfnzbs'] = {} new_config['omgwtfnzbs']['omgwtfnzbs'] = int(OMGWTFNZBS) new_config['omgwtfnzbs']['omgwtfnzbs_username'] = OMGWTFNZBS_USERNAME new_config['omgwtfnzbs']['omgwtfnzbs_apikey'] = OMGWTFNZBS_APIKEY new_config['SABnzbd'] = {} new_config['SABnzbd']['sab_username'] = SAB_USERNAME new_config['SABnzbd']['sab_password'] = SAB_PASSWORD new_config['SABnzbd']['sab_apikey'] = SAB_APIKEY new_config['SABnzbd']['sab_category'] = SAB_CATEGORY new_config['SABnzbd']['sab_host'] = SAB_HOST new_config['NZBget'] = {} new_config['NZBget']['nzbget_username'] = NZBGET_USERNAME new_config['NZBget']['nzbget_password'] = NZBGET_PASSWORD new_config['NZBget']['nzbget_category'] = NZBGET_CATEGORY new_config['NZBget']['nzbget_host'] = NZBGET_HOST new_config['XBMC'] = {} new_config['XBMC']['use_xbmc'] = int(USE_XBMC) new_config['XBMC']['xbmc_always_on'] = int(XBMC_ALWAYS_ON) new_config['XBMC']['xbmc_notify_onsnatch'] = int(XBMC_NOTIFY_ONSNATCH) new_config['XBMC']['xbmc_notify_ondownload'] = int(XBMC_NOTIFY_ONDOWNLOAD) new_config['XBMC']['xbmc_update_library'] = int(XBMC_UPDATE_LIBRARY) new_config['XBMC']['xbmc_update_full'] = int(XBMC_UPDATE_FULL) new_config['XBMC']['xbmc_update_onlyfirst'] = int(XBMC_UPDATE_ONLYFIRST) new_config['XBMC']['xbmc_host'] = XBMC_HOST new_config['XBMC']['xbmc_username'] = XBMC_USERNAME new_config['XBMC']['xbmc_password'] = XBMC_PASSWORD new_config['Plex'] = {} new_config['Plex']['use_plex'] = int(USE_PLEX) new_config['Plex']['plex_notify_onsnatch'] = int(PLEX_NOTIFY_ONSNATCH) new_config['Plex']['plex_notify_ondownload'] = int(PLEX_NOTIFY_ONDOWNLOAD) new_config['Plex']['plex_update_library'] = int(PLEX_UPDATE_LIBRARY) new_config['Plex']['plex_server_host'] = PLEX_SERVER_HOST new_config['Plex']['plex_host'] = PLEX_HOST new_config['Plex']['plex_username'] = PLEX_USERNAME new_config['Plex']['plex_password'] = PLEX_PASSWORD new_config['Growl'] = {} new_config['Growl']['use_growl'] = int(USE_GROWL) new_config['Growl']['growl_notify_onsnatch'] = int(GROWL_NOTIFY_ONSNATCH) new_config['Growl']['growl_notify_ondownload'] = int(GROWL_NOTIFY_ONDOWNLOAD) new_config['Growl']['growl_host'] = GROWL_HOST new_config['Growl']['growl_password'] = GROWL_PASSWORD new_config['Prowl'] = {} new_config['Prowl']['use_prowl'] = int(USE_PROWL) new_config['Prowl']['prowl_notify_onsnatch'] = int(PROWL_NOTIFY_ONSNATCH) new_config['Prowl']['prowl_notify_ondownload'] = int(PROWL_NOTIFY_ONDOWNLOAD) new_config['Prowl']['prowl_api'] = PROWL_API new_config['Prowl']['prowl_priority'] = int(PROWL_PRIORITY) new_config['Twitter'] = {} new_config['Twitter']['use_twitter'] = int(USE_TWITTER) new_config['Twitter']['twitter_notify_onsnatch'] = int(TWITTER_NOTIFY_ONSNATCH) new_config['Twitter']['twitter_notify_ondownload'] = int(TWITTER_NOTIFY_ONDOWNLOAD) new_config['Twitter']['twitter_username'] = TWITTER_USERNAME new_config['Twitter']['twitter_password'] = TWITTER_PASSWORD new_config['Twitter']['twitter_prefix'] = TWITTER_PREFIX new_config['Boxcar2'] = {} new_config['Boxcar2']['use_boxcar2'] = int(USE_BOXCAR2) new_config['Boxcar2']['boxcar2_notify_onsnatch'] = int(BOXCAR2_NOTIFY_ONSNATCH) new_config['Boxcar2']['boxcar2_notify_ondownload'] = int(BOXCAR2_NOTIFY_ONDOWNLOAD) new_config['Boxcar2']['boxcar2_access_token'] = BOXCAR2_ACCESS_TOKEN new_config['Boxcar2']['boxcar2_sound'] = BOXCAR2_SOUND new_config['Pushover'] = {} new_config['Pushover']['use_pushover'] = int(USE_PUSHOVER) new_config['Pushover']['pushover_notify_onsnatch'] = int(PUSHOVER_NOTIFY_ONSNATCH) new_config['Pushover']['pushover_notify_ondownload'] = int(PUSHOVER_NOTIFY_ONDOWNLOAD) new_config['Pushover']['pushover_userkey'] = PUSHOVER_USERKEY new_config['Pushover']['pushover_priority'] = int(PUSHOVER_PRIORITY) new_config['Pushover']['pushover_device'] = PUSHOVER_DEVICE new_config['Pushover']['pushover_sound'] = PUSHOVER_SOUND new_config['Libnotify'] = {} new_config['Libnotify']['use_libnotify'] = int(USE_LIBNOTIFY) new_config['Libnotify']['libnotify_notify_onsnatch'] = int(LIBNOTIFY_NOTIFY_ONSNATCH) new_config['Libnotify']['libnotify_notify_ondownload'] = int(LIBNOTIFY_NOTIFY_ONDOWNLOAD) new_config['NMJ'] = {} new_config['NMJ']['use_nmj'] = int(USE_NMJ) new_config['NMJ']['nmj_host'] = NMJ_HOST new_config['NMJ']['nmj_database'] = NMJ_DATABASE new_config['NMJ']['nmj_mount'] = NMJ_MOUNT new_config['Synology'] = {} new_config['Synology']['use_synoindex'] = int(USE_SYNOINDEX) new_config['Synology']['synoindex_notify_onsnatch'] = int(SYNOINDEX_NOTIFY_ONSNATCH) new_config['Synology']['synoindex_notify_ondownload'] = int(SYNOINDEX_NOTIFY_ONDOWNLOAD) new_config['Synology']['synoindex_update_library'] = int(SYNOINDEX_UPDATE_LIBRARY) new_config['NMJv2'] = {} new_config['NMJv2']['use_nmjv2'] = int(USE_NMJv2) new_config['NMJv2']['nmjv2_host'] = NMJv2_HOST new_config['NMJv2']['nmjv2_database'] = NMJv2_DATABASE new_config['NMJv2']['nmjv2_dbloc'] = NMJv2_DBLOC new_config['Trakt'] = {} new_config['Trakt']['use_trakt'] = int(USE_TRAKT) new_config['Trakt']['trakt_username'] = TRAKT_USERNAME new_config['Trakt']['trakt_password'] = TRAKT_PASSWORD new_config['Trakt']['trakt_api'] = TRAKT_API new_config['pyTivo'] = {} new_config['pyTivo']['use_pytivo'] = int(USE_PYTIVO) new_config['pyTivo']['pytivo_notify_onsnatch'] = int(PYTIVO_NOTIFY_ONSNATCH) new_config['pyTivo']['pytivo_notify_ondownload'] = int(PYTIVO_NOTIFY_ONDOWNLOAD) new_config['pyTivo']['pytivo_update_library'] = int(PYTIVO_UPDATE_LIBRARY) new_config['pyTivo']['pytivo_host'] = PYTIVO_HOST new_config['pyTivo']['pytivo_share_name'] = PYTIVO_SHARE_NAME new_config['pyTivo']['pytivo_tivo_name'] = PYTIVO_TIVO_NAME new_config['NMA'] = {} new_config['NMA']['use_nma'] = int(USE_NMA) new_config['NMA']['nma_notify_onsnatch'] = int(NMA_NOTIFY_ONSNATCH) new_config['NMA']['nma_notify_ondownload'] = int(NMA_NOTIFY_ONDOWNLOAD) new_config['NMA']['nma_api'] = NMA_API new_config['NMA']['nma_priority'] = int(NMA_PRIORITY) new_config['Pushalot'] = {} new_config['Pushalot']['use_pushalot'] = int(USE_PUSHALOT) new_config['Pushalot']['pushalot_notify_onsnatch'] = int(PUSHALOT_NOTIFY_ONSNATCH) new_config['Pushalot']['pushalot_notify_ondownload'] = int(PUSHALOT_NOTIFY_ONDOWNLOAD) new_config['Pushalot']['pushalot_authorizationtoken'] = PUSHALOT_AUTHORIZATIONTOKEN new_config['Pushalot']['pushalot_silent'] = int(PUSHALOT_SILENT) new_config['Pushalot']['pushalot_important'] = int(PUSHALOT_IMPORTANT) new_config['Pushbullet'] = {} new_config['Pushbullet']['use_pushbullet'] = int(USE_PUSHBULLET) new_config['Pushbullet']['pushbullet_notify_onsnatch'] = int(PUSHBULLET_NOTIFY_ONSNATCH) new_config['Pushbullet']['pushbullet_notify_ondownload'] = int(PUSHBULLET_NOTIFY_ONDOWNLOAD) new_config['Pushbullet']['pushbullet_access_token'] = PUSHBULLET_ACCESS_TOKEN new_config['Pushbullet']['pushbullet_device_iden'] = PUSHBULLET_DEVICE_IDEN new_config['Slack'] = {} new_config['Slack']['use_slack'] = int(USE_SLACK) new_config['Slack']['slack_notify_onsnatch'] = int(SLACK_NOTIFY_ONSNATCH) new_config['Slack']['slack_notify_ondownload'] = int(SLACK_NOTIFY_ONDOWNLOAD) new_config['Slack']['slack_access_token'] = SLACK_ACCESS_TOKEN new_config['Slack']['slack_channel'] = SLACK_CHANNEL new_config['Slack']['slack_bot_name'] = SLACK_BOT_NAME new_config['Slack']['slack_icon_url'] = SLACK_ICON_URL new_config['Newznab'] = {} new_config['Newznab']['newznab_data'] = NEWZNAB_DATA new_config['GUI'] = {} new_config['GUI']['coming_eps_layout'] = COMING_EPS_LAYOUT new_config['GUI']['coming_eps_display_paused'] = int(COMING_EPS_DISPLAY_PAUSED) new_config['GUI']['coming_eps_sort'] = COMING_EPS_SORT new_config.write() def launchBrowser(startPort=None): if not startPort: startPort = WEB_PORT if ENABLE_HTTPS: browserURL = 'https://localhost:%d%s' % (startPort, WEB_ROOT) else: browserURL = 'http://localhost:%d%s' % (startPort, WEB_ROOT) try: webbrowser.open(browserURL, 2, 1) except: try: webbrowser.open(browserURL, 1, 1) except: logger.log(u"Unable to launch a browser", logger.ERROR) def getEpList(epIDs, showid=None): if epIDs is None or len(epIDs) == 0: return [] query = "SELECT * FROM tv_episodes WHERE tvdbid in (%s)" % (",".join(['?'] * len(epIDs)),) params = epIDs if showid is not None: query += " AND showid = ?" params.append(showid) myDB = db.DBConnection() sqlResults = myDB.select(query, params) epList = [] for curEp in sqlResults: curShowObj = helpers.findCertainShow(showList, int(curEp["showid"])) curEpObj = curShowObj.getEpisode(int(curEp["season"]), int(curEp["episode"])) epList.append(curEpObj) return epList
57,000
Python
.py
1,042
45.005758
205
0.640165
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,320
scheduler.py
midgetspy_Sick-Beard/sickbeard/scheduler.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import time import threading import traceback from sickbeard import logger from sickbeard.exceptions import ex class Scheduler: def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), run_delay=datetime.timedelta(minutes=0), start_time=None, threadName="ScheduledThread", silent=False): self.lastRun = datetime.datetime.now() + run_delay - cycleTime self.action = action self.cycleTime = cycleTime self.start_time = start_time self.thread = None self.threadName = threadName self.silent = silent self.initThread() self.abort = False def initThread(self): if self.thread == None or not self.thread.isAlive(): self.thread = threading.Thread(None, self.runAction, self.threadName) def timeLeft(self): return self.cycleTime - (datetime.datetime.now() - self.lastRun) def forceRun(self): if not self.action.amActive: self.lastRun = datetime.datetime.fromordinal(1) return True return False def runAction(self): while True: current_time = datetime.datetime.now() should_run = False # check if interval has passed if current_time - self.lastRun >= self.cycleTime: # check if wanting to start around certain time taking interval into account if self.start_time: hour_diff = current_time.time().hour - self.start_time.hour if hour_diff >= 0 and hour_diff < self.cycleTime.seconds / 3600: should_run = True else: # set lastRun to only check start_time after another cycleTime self.lastRun = current_time else: should_run = True if should_run: self.lastRun = current_time try: if not self.silent: logger.log(u"Starting new thread: " + self.threadName, logger.DEBUG) self.action.run() except Exception, e: logger.log(u"Exception generated in thread " + self.threadName + ": " + ex(e), logger.ERROR) logger.log(repr(traceback.format_exc()), logger.DEBUG) if self.abort: self.abort = False self.thread = None return time.sleep(1)
3,360
Python
.py
74
34.054054
176
0.609982
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,321
webapi.py
midgetspy_Sick-Beard/sickbeard/webapi.py
# Author: Dennis Lutter <[email protected]> # Author: Jonathon Saine <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import time import urllib import datetime import threading import re import traceback import cherrypy import sickbeard import webserve from sickbeard import db, logger, exceptions, history, ui, helpers from sickbeard.exceptions import ex from sickbeard import encodingKludge as ek from sickbeard import search_queue, processTV from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN from common import Quality, qualityPresetStrings, statusStrings from sickbeard import image_cache from lib.tvdb_api import tvdb_api, tvdb_exceptions try: import json except ImportError: from lib import simplejson as json try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree dateFormat = "%Y-%m-%d" dateTimeFormat = "%Y-%m-%d %H:%M" RESULT_SUCCESS = 10 # only use inside the run methods RESULT_FAILURE = 20 # only use inside the run methods RESULT_TIMEOUT = 30 # not used yet :( RESULT_ERROR = 40 # only use outside of the run methods ! RESULT_FATAL = 50 # only use in Api.default() ! this is the "we encountered an internal error" error RESULT_DENIED = 60 # only use in Api.default() ! this is the access denied error result_type_map = {RESULT_SUCCESS: "success", RESULT_FAILURE: "failure", RESULT_TIMEOUT: "timeout", RESULT_ERROR: "error", RESULT_FATAL: "fatal", RESULT_DENIED: "denied", } # basically everything except RESULT_SUCCESS / success is bad class Api: """ api class that returns json results """ version = 4 # use an int since float-point is unpredictable intent = 4 @cherrypy.expose def default(self, *args, **kwargs): self.apiKey = sickbeard.API_KEY access, accessMsg, args, kwargs = self._grand_access(self.apiKey, args, kwargs) # set the output callback # default json outputCallbackDict = {'default': self._out_as_json, 'image': lambda x: x['image'], } # do we have access ? if access: logger.log(accessMsg, logger.DEBUG) else: logger.log(accessMsg, logger.WARNING) return outputCallbackDict['default'](_responds(RESULT_DENIED, msg=accessMsg)) # set the original call_dispatcher as the local _call_dispatcher _call_dispatcher = call_dispatcher # if profile was set wrap "_call_dispatcher" in the profile function if 'profile' in kwargs: from lib.profilehooks import profile _call_dispatcher = profile(_call_dispatcher, immediate=True) del kwargs["profile"] # if debug was set call the "_call_dispatcher" if 'debug' in kwargs: # this way we can debug the cherry.py traceback in the browser outDict = _call_dispatcher(args, kwargs) del kwargs["debug"] # if debug was not set we wrap the "call_dispatcher" in a try block to assure a json output else: try: outDict = _call_dispatcher(args, kwargs) # seems like cherrypy uses exceptions for redirecting apparently this can happen when requesting images but it is ok so lets re raise it except cherrypy.HTTPRedirect: raise # real internal error oohhh nooo :( except Exception, e: logger.log(u"API :: " + ex(e), logger.ERROR) errorData = {"error_msg": ex(e), "args": args, "kwargs": kwargs} outDict = _responds(RESULT_FATAL, errorData, "SickBeard encountered an internal error! Please report to the Devs") if 'outputType' in outDict: outputCallback = outputCallbackDict[outDict['outputType']] else: outputCallback = outputCallbackDict['default'] return outputCallback(outDict) @cherrypy.expose def builder(self): """ expose the api-builder template """ t = webserve.PageTemplate(file="apiBuilder.tmpl") def titler(x): if not x: return x if not x.lower().startswith('a to ') and x.lower().startswith('a '): x = x[2:] elif x.lower().startswith('an '): x = x[3:] elif x.lower().startswith('the '): x = x[4:] return x # enforce a 100 show limit to ensure performance t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))[0:100] myDB = db.DBConnection(row_type="dict") seasonSQLResults = {} episodeSQLResults = {} for curShow in t.sortedShowList: seasonSQLResults[curShow.tvdbid] = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [curShow.tvdbid]) for curShow in t.sortedShowList: episodeSQLResults[curShow.tvdbid] = myDB.select("SELECT DISTINCT season,episode FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid]) t.seasonSQLResults = seasonSQLResults t.episodeSQLResults = episodeSQLResults myDB.connection.close() if len(sickbeard.API_KEY) == 32: t.apikey = sickbeard.API_KEY else: t.apikey = "api key not generated" return webserve._munge(t) def _out_as_json(self, dict): """ set cherrypy response to json """ response = cherrypy.response request = cherrypy.request response.headers['Content-Type'] = 'application/json;charset=UTF-8' try: out = json.dumps(dict, indent=self.intent, sort_keys=True) callback = request.params.get('callback') or request.params.get('jsonp') if callback is not None: # wrap with JSONP call if requested out = callback + '(' + out + ');' # if we fail to generate the output fake an error except Exception, e: logger.log(u"API :: " + traceback.format_exc(), logger.DEBUG) out = '{"result":"' + result_type_map[RESULT_ERROR] + '", "message": "error while composing output: "' + ex(e) + '"}' return out def _grand_access(self, realKey, args, kwargs): """ validate api key and log result """ remoteIp = cherrypy.request.remote.ip apiKey = kwargs.get("apikey", None) if not apiKey: # if we have keyless vars we assume first one is the api key, always ! if args: apiKey = args[0] # remove the apikey from the args tuple args = args[1:] else: del kwargs["apikey"] if sickbeard.USE_API is not True: msg = u"API :: " + remoteIp + " - SB API Disabled. ACCESS DENIED" return False, msg, args, kwargs elif apiKey == realKey: msg = u"API :: " + remoteIp + " - gave correct API KEY. ACCESS GRANTED" return True, msg, args, kwargs elif not apiKey: msg = u"API :: " + remoteIp + " - gave NO API KEY. ACCESS DENIED" return False, msg, args, kwargs else: msg = u"API :: " + remoteIp + " - gave WRONG API KEY " + apiKey + ". ACCESS DENIED" return False, msg, args, kwargs def call_dispatcher(args, kwargs): """ calls the appropriate CMD class looks for a cmd in args and kwargs or calls the TVDBShorthandWrapper when the first args element is a number or returns an error that there is no such cmd """ logger.log(u"API :: all args: '" + str(args) + "'", logger.DEBUG) logger.log(u"API :: all kwargs: '" + str(kwargs) + "'", logger.DEBUG) cmds = None if args: cmds = args[0] args = args[1:] if "cmd" in kwargs: cmds = kwargs["cmd"] del kwargs["cmd"] outDict = {} if cmds is not None: cmds = cmds.split("|") multiCmds = bool(len(cmds) > 1) for cmd in cmds: curArgs, curKwargs = filter_params(cmd, args, kwargs) cmdIndex = None # was a index used for this cmd ? if len(cmd.split("_")) > 1: # this gives us the clear cmd and the index cmd, cmdIndex = cmd.split("_") logger.log(u"API :: " + cmd + ": curKwargs " + str(curKwargs), logger.DEBUG) # skip these cmd while chaining if not (multiCmds and cmd in ('show.getposter', 'show.getbanner')): try: if cmd in _functionMaper: # get the cmd class, init it and run() curOutDict = _functionMaper.get(cmd)(curArgs, curKwargs).run() elif _is_int(cmd): curOutDict = TVDBShorthandWrapper(curArgs, curKwargs, cmd).run() else: curOutDict = _responds(RESULT_ERROR, "No such cmd: '" + cmd + "'") # Api errors that we raised, they are harmless except ApiError, e: curOutDict = _responds(RESULT_ERROR, msg=ex(e)) # if someone chained one of the forbidden cmds they will get an error for this one cmd else: curOutDict = _responds(RESULT_ERROR, msg="The cmd '" + cmd + "' is not supported while chaining") if multiCmds: # note: if multiple same cmds are issued but one has not an index defined it will override all others # or the other way around, this depends on the order of the cmds this is not a bug # do we need a index dict for this cmd ? if cmdIndex is None: outDict[cmd] = curOutDict else: if not cmd in outDict: outDict[cmd] = {} outDict[cmd][cmdIndex] = curOutDict else: outDict = curOutDict # if we had multiple cmds we have to wrap it in a response dict if multiCmds: outDict = _responds(RESULT_SUCCESS, outDict) # index / no cmd given else: outDict = CMD_SickBeard(args, kwargs).run() return outDict def filter_params(cmd, args, kwargs): """ return only params kwargs that are for cmd and rename them to a clean version (remove "<cmd>_") args are shared across all cmds all args and kwarks are lowerd cmd are separated by "|" e.g. &cmd=shows|future kwargs are namespaced with "." e.g. show.tvdbid=101501 if a karg has no namespace asing it anyways (global) full e.g. /api?apikey=1234&cmd=show.seasonlist_asd|show.seasonlist_2&show.seasonlist_asd.tvdbid=101501&show.seasonlist_2.tvdbid=79488&sort=asc two calls of show.seasonlist one has the index "asd" the other one "2" the "tvdbid" kwargs / params have the indexed cmd as a namspace and the kwarg / param "sort" is a used as a global """ curArgs = [] for arg in args: curArgs.append(arg.lower()) curArgs = tuple(curArgs) curKwargs = {} for kwarg in kwargs: if kwarg.find(cmd + ".") == 0: cleanKey = kwarg.rpartition(".")[2] curKwargs[cleanKey] = kwargs[kwarg].lower() # the kwarg was not namespaced therefore a "global" elif not "." in kwarg: curKwargs[kwarg] = kwargs[kwarg] return curArgs, curKwargs class ApiCall(object): _help = {"desc": "No help message available. Please tell the devs that a help msg is missing for this cmd"} def __init__(self, args, kwargs): # missing try: if self._missing: self.run = self.return_missing except AttributeError: pass # help if 'help' in kwargs: self.run = self.return_help def run(self): # override with real output function in subclass return {} def return_help(self): try: if self._requiredParams: pass except AttributeError: self._requiredParams = [] try: if self._optionalParams: pass except AttributeError: self._optionalParams = [] for paramDict, type in [(self._requiredParams, "requiredParameters"), (self._optionalParams, "optionalParameters")]: if type in self._help: for paramName in paramDict: if not paramName in self._help[type]: self._help[type][paramName] = {} if paramDict[paramName]["allowedValues"]: self._help[type][paramName]["allowedValues"] = paramDict[paramName]["allowedValues"] else: self._help[type][paramName]["allowedValues"] = "see desc" self._help[type][paramName]["defaultValue"] = paramDict[paramName]["defaultValue"] elif paramDict: for paramName in paramDict: self._help[type] = {} self._help[type][paramName] = paramDict[paramName] else: self._help[type] = {} msg = "No description available" if "desc" in self._help: msg = self._help["desc"] del self._help["desc"] return _responds(RESULT_SUCCESS, self._help, msg) def return_missing(self): if len(self._missing) == 1: msg = "The required parameter: '" + self._missing[0] + "' was not set" else: msg = "The required parameters: '" + "','".join(self._missing) + "' where not set" return _responds(RESULT_ERROR, msg=msg) def check_params(self, args, kwargs, key, default, required, type, allowedValues): """ function to check passed params for the shorthand wrapper and to detect missing/required param """ missing = True orgDefault = default if type == "bool": allowedValues = [0, 1] if args: default = args[0] missing = False args = args[1:] if kwargs.get(key): default = kwargs.get(key) missing = False if required: try: self._missing self._requiredParams.append(key) except AttributeError: self._missing = [] self._requiredParams = {} self._requiredParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} if missing and key not in self._missing: self._missing.append(key) else: try: self._optionalParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} except AttributeError: self._optionalParams = {} self._optionalParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} if default: default = self._check_param_type(default, key, type) if type == "bool": type = [] self._check_param_value(default, key, allowedValues) return default, args def _check_param_type(self, value, name, type): """ checks if value can be converted / parsed to type will raise an error on failure or will convert it to type and return new converted value can check for: - int: will be converted into int - bool: will be converted to False / True - list: will always return a list - string: will do nothing for now - ignore: will ignore it, just like "string" """ error = False if type == "int": if _is_int(value): value = int(value) else: error = True elif type == "bool": if value in ("0", "1"): value = bool(int(value)) elif value in ("true", "True", "TRUE"): value = True elif value in ("false", "False", "FALSE"): value = False else: error = True elif type == "list": value = value.split("|") elif type == "string": pass elif type == "ignore": pass else: logger.log(u"API :: Invalid param type set " + str(type) + " can not check or convert ignoring it", logger.ERROR) if error: # this is a real ApiError !! raise ApiError(u"param: '" + str(name) + "' with given value: '" + str(value) + "' could not be parsed into '" + str(type) + "'") return value def _check_param_value(self, value, name, allowedValues): """ will check if value (or all values in it ) are in allowed values will raise an exception if value is "out of range" if bool(allowedValue) == False a check is not performed and all values are excepted """ if allowedValues: error = False if isinstance(value, list): for item in value: if not item in allowedValues: error = True else: if not value in allowedValues: error = True if error: # this is kinda a ApiError but raising an error is the only way of quitting here raise ApiError(u"param: '" + str(name) + "' with given value: '" + str(value) + "' is out of allowed range '" + str(allowedValues) + "'") class TVDBShorthandWrapper(ApiCall): _help = {"desc": "this is an internal function wrapper. call the help command directly for more information"} def __init__(self, args, kwargs, sid): self.origArgs = args self.kwargs = kwargs self.sid = sid self.s, args = self.check_params(args, kwargs, "s", None, False, "ignore", []) self.e, args = self.check_params(args, kwargs, "e", None, False, "ignore", []) self.args = args ApiCall.__init__(self, args, kwargs) def run(self): """ internal function wrapper """ args = (self.sid,) + self.origArgs if self.e: return CMD_Episode(args, self.kwargs).run() elif self.s: return CMD_ShowSeasons(args, self.kwargs).run() else: return CMD_Show(args, self.kwargs).run() ################################ # helper functions # ################################ def _sizeof_fmt(num): for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.00: return "%3.2f %s" % (num, x) num /= 1024.00 def _is_int(data): try: int(data) except (TypeError, ValueError, OverflowError): return False else: return True def _rename_element(dict, oldKey, newKey): try: dict[newKey] = dict[oldKey] del dict[oldKey] except (ValueError, TypeError, NameError): pass return dict def _responds(result_type, data=None, msg=""): """ result is a string of given "type" (success/failure/timeout/error) message is a human readable string, can be empty data is either a dict or a array, can be a empty dict or empty array """ if data is None: data = {} return {"result": result_type_map[result_type], "message": msg, "data": data} def _get_quality_string(q): qualityString = "Custom" if q in qualityPresetStrings: qualityString = qualityPresetStrings[q] elif q in Quality.qualityStrings: qualityString = Quality.qualityStrings[q] return qualityString def _get_status_Strings(s): return statusStrings[s] def _ordinal_to_dateTimeForm(ordinal): # workaround for episodes with no airdate if int(ordinal) != 1: date = datetime.date.fromordinal(ordinal) else: return "" return date.strftime(dateTimeFormat) def _ordinal_to_dateForm(ordinal): if int(ordinal) != 1: date = datetime.date.fromordinal(ordinal) else: return "" return date.strftime(dateFormat) def _historyDate_to_dateTimeForm(timeString): date = datetime.datetime.strptime(timeString, history.dateFormat) return date.strftime(dateTimeFormat) def _replace_statusStrings_with_statusCodes(statusStrings): statusCodes = [] if "snatched" in statusStrings: statusCodes += Quality.SNATCHED if "downloaded" in statusStrings: statusCodes += Quality.DOWNLOADED if "skipped" in statusStrings: statusCodes.append(SKIPPED) if "wanted" in statusStrings: statusCodes.append(WANTED) if "archived" in statusStrings: statusCodes.append(ARCHIVED) if "ignored" in statusStrings: statusCodes.append(IGNORED) if "unaired" in statusStrings: statusCodes.append(UNAIRED) return statusCodes def _mapQuality(showObj): quality_map = _getQualityMap() anyQualities = [] bestQualities = [] iqualityID, aqualityID = Quality.splitQuality(int(showObj)) if iqualityID: for quality in iqualityID: anyQualities.append(quality_map[quality]) if aqualityID: for quality in aqualityID: bestQualities.append(quality_map[quality]) return anyQualities, bestQualities def _getQualityMap(): return {Quality.SDTV: 'sdtv', Quality.SDDVD: 'sddvd', Quality.HDTV: 'hdtv', Quality.RAWHDTV: 'rawhdtv', Quality.FULLHDTV: 'fullhdtv', Quality.HDWEBDL: 'hdwebdl', Quality.FULLHDWEBDL: 'fullhdwebdl', Quality.HDBLURAY: 'hdbluray', Quality.FULLHDBLURAY: 'fullhdbluray', Quality.UNKNOWN: 'unknown'} def _getRootDirs(): if sickbeard.ROOT_DIRS == "": return {} rootDir = {} root_dirs = sickbeard.ROOT_DIRS.split('|') default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) rootDir["default_index"] = int(sickbeard.ROOT_DIRS.split('|')[0]) # remove default_index value from list (this fixes the offset) root_dirs.pop(0) if len(root_dirs) < default_index: return {} # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] default_dir = root_dirs[default_index] dir_list = [] for root_dir in root_dirs: valid = 1 try: ek.ek(os.listdir, root_dir) except: valid = 0 default = 0 if root_dir is default_dir: default = 1 curDir = {} curDir['valid'] = valid curDir['location'] = root_dir curDir['default'] = default dir_list.append(curDir) return dir_list class ApiError(Exception): "Generic API error" class IntParseError(Exception): "A value could not be parsed into a int. But should be parsable to a int " class CMD_Help(ApiCall): _help = {"desc": "display help information for a given subject/command", "optionalParameters": {"subject": {"desc": "command - the top level command"}, } } def __init__(self, args, kwargs): # required # optional self.subject, args = self.check_params(args, kwargs, "subject", "help", False, "string", _functionMaper.keys()) ApiCall.__init__(self, args, kwargs) def run(self): """ display help information for a given subject/command """ if self.subject in _functionMaper: out = _responds(RESULT_SUCCESS, _functionMaper.get(self.subject)((), {"help": 1}).run()) else: out = _responds(RESULT_FAILURE, msg="No such cmd") return out class CMD_ComingEpisodes(ApiCall): _help = {"desc": "display the coming episodes", "optionalParameters": {"sort": {"desc": "change the sort order"}, "type": {"desc": "one or more of allowedValues separated by |"}, "paused": {"desc": "0 to exclude paused shows, 1 to include them, or omitted to use the SB default"}, } } def __init__(self, args, kwargs): # required # optional self.sort, args = self.check_params(args, kwargs, "sort", "date", False, "string", ["date", "show", "network"]) self.type, args = self.check_params(args, kwargs, "type", "today|missed|soon|later", False, "list", ["missed", "later", "today", "soon"]) self.paused, args = self.check_params(args, kwargs, "paused", sickbeard.COMING_EPS_DISPLAY_PAUSED, False, "int", [0, 1]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the coming episodes """ today = datetime.date.today().toordinal() next_week = (datetime.date.today() + datetime.timedelta(days=7)).toordinal() recently = (datetime.date.today() - datetime.timedelta(days=3)).toordinal() done_show_list = [] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] myDB = db.DBConnection(row_type="dict") sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season > 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, next_week] + qualList) for cur_result in sql_results: done_show_list.append(int(cur_result["tvdbid"])) more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes outer_eps, tv_shows WHERE season > 0 AND showid NOT IN (" + ','.join(['?'] * len(done_show_list)) + ") AND tv_shows.tvdb_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season > 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED) sql_results += more_sql_results more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season > 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) sql_results += more_sql_results # sort by air date sorts = { 'date': (lambda x, y: cmp(int(x["airdate"]), int(y["airdate"]))), 'show': (lambda a, b: cmp(a["show_name"], b["show_name"])), 'network': (lambda a, b: cmp(a["network"], b["network"])), } sql_results.sort(sorts[self.sort]) finalEpResults = {} # add all requested types or all for curType in self.type: finalEpResults[curType] = [] for ep in sql_results: """ Missed: yesterday... (less than 1week) Today: today Soon: tomorrow till next week Later: later than next week """ if ep["paused"] and not self.paused: continue status = "soon" if ep["airdate"] < today: status = "missed" elif ep["airdate"] >= next_week: status = "later" elif ep["airdate"] >= today and ep["airdate"] < next_week: if ep["airdate"] == today: status = "today" else: status = "soon" # skip unwanted if self.type is not None and not status in self.type: continue ordinalAirdate = int(ep["airdate"]) if not ep["network"]: ep["network"] = "" ep["airdate"] = _ordinal_to_dateForm(ordinalAirdate) ep["quality"] = _get_quality_string(ep["quality"]) # clean up tvdb horrible airs field ep["airs"] = str(ep["airs"]).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ') # start day of the week on 1 (monday) ep["weekday"] = 1 + datetime.date.fromordinal(ordinalAirdate).weekday() # TODO: check if this obsolete if not status in finalEpResults: finalEpResults[status] = [] finalEpResults[status].append(ep) myDB.connection.close() return _responds(RESULT_SUCCESS, finalEpResults) class CMD_Episode(ApiCall): _help = {"desc": "display detailed info about an episode", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "episode": {"desc": "the episode number"} }, "optionalParameters": {"full_path": {"desc": "show the full absolute path (if valid) instead of a relative path for the episode location"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", []) # optional self.fullPath, args = self.check_params(args, kwargs, "full_path", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display detailed info about an episode """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") sqlResults = myDB.select("SELECT name, description, airdate, status, location, file_size, release_name FROM tv_episodes WHERE showid = ? AND episode = ? AND season = ?", [self.tvdbid, self.e, self.s]) if not len(sqlResults) == 1: raise ApiError("Episode not found") episode = sqlResults[0] # handle path options # absolute vs relative vs broken showPath = None try: showPath = showObj.location except sickbeard.exceptions.ShowDirNotFoundException: pass if bool(self.fullPath) is True and showPath: pass elif bool(self.fullPath) is False and showPath: # using the length because lstrip removes to much showPathLength = len(showPath) + 1 # the / or \ yeah not that nice i know episode["location"] = episode["location"][showPathLength:] elif not showPath: # show dir is broken ... episode path will be empty episode["location"] = "" # convert stuff to human form episode["airdate"] = _ordinal_to_dateForm(episode["airdate"]) status, quality = Quality.splitCompositeStatus(int(episode["status"])) episode["status"] = _get_status_Strings(status) episode["quality"] = _get_quality_string(quality) if episode["file_size"]: episode["file_size_human"] = _sizeof_fmt(episode["file_size"]) else: episode["file_size_human"] = "" myDB.connection.close() return _responds(RESULT_SUCCESS, episode) class CMD_EpisodeSearch(ApiCall): _help = {"desc": "search for an episode. the response might take some time", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "episode": {"desc": "the episode number"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ search for an episode """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # retrieve the episode object and fail if we can't get one epObj = showObj.getEpisode(int(self.s), int(self.e)) if isinstance(epObj, str): return _responds(RESULT_FAILURE, msg="Episode not found") # make a queue item for it and put it on the queue ep_queue_item = search_queue.ManualSearchQueueItem(epObj) sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) # @UndefinedVariable # wait until the queue item tells us whether it worked or not while ep_queue_item.success is None: # @UndefinedVariable time.sleep(1) # return the correct json value if ep_queue_item.success: status, quality = Quality.splitCompositeStatus(epObj.status) # @UnusedVariable # TODO: split quality and status? return _responds(RESULT_SUCCESS, {"quality": _get_quality_string(quality)}, "Snatched (" + _get_quality_string(quality) + ")") return _responds(RESULT_FAILURE, msg='Unable to find episode') class CMD_EpisodeSetStatus(ApiCall): _help = {"desc": "set status of an episode or season (when no ep is provided)", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "status": {"desc": "the status values: wanted, skipped, archived, ignored"} }, "optionalParameters": {"episode": {"desc": "the episode number"}, "force": {"desc": "should we replace existing (downloaded) episodes or not"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.status, args = self.check_params(args, kwargs, "status", None, True, "string", ["wanted", "skipped", "archived", "ignored"]) # optional self.e, args = self.check_params(args, kwargs, "episode", None, False, "int", []) self.force, args = self.check_params(args, kwargs, "force", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set status of an episode or a season (when no ep is provided) """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # convert the string status to a int for status in statusStrings.statusStrings: if str(statusStrings[status]).lower() == str(self.status).lower(): self.status = status break # if we dont break out of the for loop we got here. else: # the allowed values has at least one item that could not be matched against the internal status strings raise ApiError("The status string could not be matched to a status. Report to Devs!") ep_list = [] if self.e: epObj = showObj.getEpisode(self.s, self.e) if epObj is None: return _responds(RESULT_FAILURE, msg="Episode not found") ep_list = [epObj] else: # get all episode numbers frome self,season ep_list = showObj.getAllEpisodes(season=self.s) def _epResult(result_code, ep, msg=""): return {'season': ep.season, 'episode': ep.episode, 'status': _get_status_Strings(ep.status), 'result': result_type_map[result_code], 'message': msg} ep_results = [] failure = False start_backlog = False ep_segment = None for epObj in ep_list: if ep_segment is None and self.status == WANTED: # figure out what segment the episode is in and remember it so we can backlog it if showObj.air_by_date: ep_segment = str(epObj.airdate)[:7] else: ep_segment = epObj.season with epObj.lock: # don't let them mess up UNAIRED episodes if epObj.status == UNAIRED: # setting the status of a unaired is only considert a failure if we directly wanted this episode, but is ignored on a season request if self.e is not None: ep_results.append(_epResult(RESULT_FAILURE, epObj, "Refusing to change status because it is UNAIRED")) failure = True continue # allow the user to force setting the status for an already downloaded episode if epObj.status in Quality.DOWNLOADED and not self.force: ep_results.append(_epResult(RESULT_FAILURE, epObj, "Refusing to change status because it is already marked as DOWNLOADED")) failure = True continue epObj.status = self.status epObj.saveToDB() if self.status == WANTED: start_backlog = True ep_results.append(_epResult(RESULT_SUCCESS, epObj)) extra_msg = "" if start_backlog: cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, ep_segment) sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) # @UndefinedVariable logger.log(u"API :: Starting backlog for " + showObj.name + " season " + str(ep_segment) + " because some episodes were set to WANTED") extra_msg = " Backlog started" if failure: return _responds(RESULT_FAILURE, ep_results, 'Failed to set all or some status. Check data.' + extra_msg) else: return _responds(RESULT_SUCCESS, msg='All status set successfully.' + extra_msg) class CMD_Exceptions(ApiCall): _help = {"desc": "display scene exceptions for all or a given show", "optionalParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required # optional self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display scene exceptions for all or a given show """ myDB = db.DBConnection("cache.db", row_type="dict") if self.tvdbid is None: sqlResults = myDB.select("SELECT show_name, tvdb_id AS 'tvdbid' FROM scene_exceptions") scene_exceptions = {} for row in sqlResults: tvdbid = row["tvdbid"] if not tvdbid in scene_exceptions: scene_exceptions[tvdbid] = [] scene_exceptions[tvdbid].append(row["show_name"]) else: showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") sqlResults = myDB.select("SELECT show_name, tvdb_id AS 'tvdbid' FROM scene_exceptions WHERE tvdb_id = ?", [self.tvdbid]) scene_exceptions = [] for row in sqlResults: scene_exceptions.append(row["show_name"]) myDB.connection.close() return _responds(RESULT_SUCCESS, scene_exceptions) class CMD_History(ApiCall): _help = {"desc": "display sickbeard downloaded/snatched history", "optionalParameters": {"limit": {"desc": "limit returned results"}, "type": {"desc": "only show a specific type of results"}, } } def __init__(self, args, kwargs): # required # optional self.limit, args = self.check_params(args, kwargs, "limit", 100, False, "int", []) self.type, args = self.check_params(args, kwargs, "type", None, False, "string", ["downloaded", "snatched"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display sickbeard downloaded/snatched history """ typeCodes = [] if self.type == "downloaded": self.type = "Downloaded" typeCodes = Quality.DOWNLOADED elif self.type == "snatched": self.type = "Snatched" typeCodes = Quality.SNATCHED else: typeCodes = Quality.SNATCHED + Quality.DOWNLOADED myDB = db.DBConnection(row_type="dict") ulimit = min(int(self.limit), 100) if ulimit == 0: sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id AND action in (" + ','.join(['?'] * len(typeCodes)) + ") ORDER BY date DESC", typeCodes) else: sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id AND action in (" + ','.join(['?'] * len(typeCodes)) + ") ORDER BY date DESC LIMIT ?", typeCodes + [ulimit]) results = [] for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["action"])) status = _get_status_Strings(status) if self.type and not status == self.type: continue row["status"] = status row["quality"] = _get_quality_string(quality) row["date"] = _historyDate_to_dateTimeForm(str(row["date"])) del row["action"] _rename_element(row, "showid", "tvdbid") row["resource_path"] = os.path.dirname(row["resource"]) row["resource"] = os.path.basename(row["resource"]) results.append(row) myDB.connection.close() return _responds(RESULT_SUCCESS, results) class CMD_HistoryClear(ApiCall): _help = {"desc": "clear sickbeard's history", } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ clear sickbeard's history """ myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE 1=1") myDB.action("VACUUM") myDB.connection.close() return _responds(RESULT_SUCCESS, msg="History cleared") class CMD_HistoryTrim(ApiCall): _help = {"desc": "trim sickbeard's history by removing entries greater than 30 days old" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ trim sickbeard's history """ myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE date < " + str((datetime.datetime.today() - datetime.timedelta(days=30)).strftime(history.dateFormat))) myDB.action("VACUUM") myDB.connection.close() return _responds(RESULT_SUCCESS, msg="Removed history entries greater than 30 days old") class CMD_Logs(ApiCall): _help = {"desc": "view sickbeard's log", "optionalParameters": {"min_level ": {"desc": "the minimum level classification of log entries to show, with each level inherting its above level"} } } def __init__(self, args, kwargs): # required # optional self.min_level, args = self.check_params(args, kwargs, "min_level", "error", False, "string", ["error", "warning", "info", "debug"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ view sickbeard's log """ # 10 = Debug / 20 = Info / 30 = Warning / 40 = Error minLevel = logger.reverseNames[str(self.min_level).upper()] data = [] if os.path.isfile(logger.sb_log_instance.log_file_path): with ek.ek(open, logger.sb_log_instance.log_file_path) as f: data = f.readlines() regex = "^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$" finalData = [] numLines = 0 lastLine = False numToShow = min(50, len(data)) for x in reversed(data): x = x.decode('utf-8') match = re.match(regex, x) if match: level = match.group(7) if level not in logger.reverseNames: lastLine = False continue if logger.reverseNames[level] >= minLevel: lastLine = True finalData.append(x.rstrip("\n")) else: lastLine = False continue elif lastLine: finalData.append("AA" + x) numLines += 1 if numLines >= numToShow: break return _responds(RESULT_SUCCESS, finalData) class CMD_PostProcess(ApiCall): _help = {"desc": "Manual postprocess TV Download Dir", "optionalParameters": {"path": {"desc": "Post process this folder"}, "force_replace": {"desc": "Overwrite files"}, "return_data": {"desc": "Returns result for the process"} } } def __init__(self, args, kwargs): # required # optional self.path, args = self.check_params(args, kwargs, "path", None, False, "string", []) self.force_replace, args = self.check_params(args, kwargs, "force_replace", 0, False, "bool", []) self.return_data, args = self.check_params(args, kwargs, "return_data", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ Starts the postprocess """ pp_options = {} if not self.path and not sickbeard.TV_DOWNLOAD_DIR: return _responds(RESULT_FAILURE, msg="You need to provide a path or set TV Download Dir") if not self.path: self.path = sickbeard.TV_DOWNLOAD_DIR if bool(self.force_replace): pp_options["force_replace"] = True data = processTV.processDir(self.path, method="Manual", pp_options=pp_options) if not self.return_data: data = "" return _responds(RESULT_SUCCESS, data=data, msg="Started postprocess for %s" % self.path) class CMD_SickBeard(ApiCall): _help = {"desc": "display misc sickbeard related information"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display misc sickbeard related information """ data = {"sb_version": sickbeard.version.SICKBEARD_VERSION, "api_version": Api.version, "api_commands": sorted(_functionMaper.keys())} return _responds(RESULT_SUCCESS, data) class CMD_SickBeardAddRootDir(ApiCall): _help = {"desc": "add a sickbeard user's parent directory", "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"} }, "optionalParameters": {"default": {"desc": "make the location passed the default root (parent) directory"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional self.default, args = self.check_params(args, kwargs, "default", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a parent directory to sickbeard's config """ self.location = urllib.unquote_plus(self.location) location_matched = 0 # dissallow adding/setting an invalid dir if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg="Location is invalid") root_dirs = [] if sickbeard.ROOT_DIRS == "": self.default = 1 else: root_dirs = sickbeard.ROOT_DIRS.split('|') index = int(sickbeard.ROOT_DIRS.split('|')[0]) root_dirs.pop(0) # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] for x in root_dirs: if(x == self.location): location_matched = 1 if (self.default == 1): index = root_dirs.index(self.location) break if(location_matched == 0): if (self.default == 1): index = 0 root_dirs.insert(0, self.location) else: root_dirs.append(self.location) root_dirs_new = [urllib.unquote_plus(x) for x in root_dirs] root_dirs_new.insert(0, index) root_dirs_new = '|'.join(unicode(x) for x in root_dirs_new) sickbeard.ROOT_DIRS = root_dirs_new return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directories updated") class CMD_SickBeardCheckScheduler(ApiCall): _help = {"desc": "query the scheduler"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ query the scheduler """ myDB = db.DBConnection() sqlResults = myDB.select("SELECT last_backlog FROM info") backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable searchStatus = sickbeard.currentSearchScheduler.action.amActive # @UndefinedVariable nextSearch = str(sickbeard.currentSearchScheduler.timeLeft()).split('.')[0] nextBacklog = sickbeard.backlogSearchScheduler.nextRun().strftime(dateFormat).decode(sickbeard.SYS_ENCODING) myDB.connection.close() data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning), "last_backlog": _ordinal_to_dateForm(sqlResults[0]["last_backlog"]), "search_is_running": int(searchStatus), "next_search": nextSearch, "next_backlog": nextBacklog} return _responds(RESULT_SUCCESS, data) class CMD_SickBeardDeleteRootDir(ApiCall): _help = {"desc": "delete a sickbeard user's parent directory", "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ delete a parent directory from sickbeard's config """ if sickbeard.ROOT_DIRS == "": return _responds(RESULT_FAILURE, _getRootDirs(), msg="No root directories detected") root_dirs_new = [] root_dirs = sickbeard.ROOT_DIRS.split('|') index = int(root_dirs[0]) root_dirs.pop(0) # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] old_root_dir = root_dirs[index] for curRootDir in root_dirs: if not curRootDir == self.location: root_dirs_new.append(curRootDir) else: newIndex = 0 for curIndex, curNewRootDir in enumerate(root_dirs_new): if curNewRootDir is old_root_dir: newIndex = curIndex break root_dirs_new = [urllib.unquote_plus(x) for x in root_dirs_new] if len(root_dirs_new) > 0: root_dirs_new.insert(0, newIndex) root_dirs_new = "|".join(unicode(x) for x in root_dirs_new) sickbeard.ROOT_DIRS = root_dirs_new # what if the root dir was not found? return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directory deleted") class CMD_SickBeardForceSearch(ApiCall): _help = {"desc": "force the episode search early" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ force the episode search early """ # Changing all old missing episodes to status WANTED # Beginning search for new episodes on RSS # Searching all providers for any needed episodes result = sickbeard.currentSearchScheduler.forceRun() if result: return _responds(RESULT_SUCCESS, msg="Episode search forced") return _responds(RESULT_FAILURE, msg="Can not search for episode") class CMD_SickBeardGetDefaults(ApiCall): _help = {"desc": "get sickbeard user defaults"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get sickbeard user defaults """ anyQualities, bestQualities = _mapQuality(sickbeard.QUALITY_DEFAULT) data = {"status": statusStrings[sickbeard.STATUS_DEFAULT].lower(), "flatten_folders": int(sickbeard.FLATTEN_FOLDERS_DEFAULT), "initial": anyQualities, "archive": bestQualities, "future_show_paused": int(sickbeard.COMING_EPS_DISPLAY_PAUSED) } return _responds(RESULT_SUCCESS, data) class CMD_SickBeardGetMessages(ApiCall): _help = {"desc": "get all messages"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): messages = [] for cur_notification in ui.notifications.get_notifications(): messages.append({"title": cur_notification.title, "message": cur_notification.message, "type": cur_notification.type}) return _responds(RESULT_SUCCESS, messages) class CMD_SickBeardGetRootDirs(ApiCall): _help = {"desc": "get sickbeard user parent directories"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the parent directories defined in sickbeard's config """ return _responds(RESULT_SUCCESS, _getRootDirs()) class CMD_SickBeardPauseBacklog(ApiCall): _help = {"desc": "pause the backlog search", "optionalParameters": {"pause ": {"desc": "pause or unpause the global backlog"} } } def __init__(self, args, kwargs): # required # optional self.pause, args = self.check_params(args, kwargs, "pause", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ pause the backlog search """ if self.pause is True: sickbeard.searchQueueScheduler.action.pause_backlog() # @UndefinedVariable return _responds(RESULT_SUCCESS, msg="Backlog paused") else: sickbeard.searchQueueScheduler.action.unpause_backlog() # @UndefinedVariable return _responds(RESULT_SUCCESS, msg="Backlog unpaused") class CMD_SickBeardPing(ApiCall): _help = {"desc": "check to see if sickbeard is running"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ check to see if sickbeard is running """ cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" if sickbeard.started: return _responds(RESULT_SUCCESS, {"pid": sickbeard.PID}, "Pong") else: return _responds(RESULT_SUCCESS, msg="Pong") class CMD_SickBeardRestart(ApiCall): _help = {"desc": "restart sickbeard"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ restart sickbeard """ threading.Timer(2, sickbeard.invoke_restart, [False]).start() return _responds(RESULT_SUCCESS, msg="SickBeard is restarting...") class CMD_SickBeardSearchTVDB(ApiCall): _help = {"desc": "search for show at tvdb with a given string and language", "optionalParameters": {"name": {"desc": "name of the show you want to search for"}, "tvdbid": {"desc": "thetvdb.com unique id of a show"}, "lang": {"desc": "the 2 letter abbreviation lang id"} } } valid_languages = { 'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} def __init__(self, args, kwargs): # required # optional self.name, args = self.check_params(args, kwargs, "name", None, False, "string", []) self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", []) self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string", self.valid_languages.keys()) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ search for show at tvdb with a given string and language """ # only name was given if self.name and not self.tvdbid: baseURL = "http://thetvdb.com/api/GetSeries.php?" params = {"seriesname": str(self.name).encode('utf-8'), 'language': self.lang} finalURL = baseURL + urllib.urlencode(params) urlData = sickbeard.helpers.getURL(finalURL) if urlData is None: return _responds(RESULT_FAILURE, msg="Did not get result from tvdb") else: try: seriesXML = etree.ElementTree(etree.XML(urlData)) except Exception, e: logger.log(u"API :: Unable to parse XML for some reason: " + ex(e) + " from XML: " + urlData, logger.ERROR) return _responds(RESULT_FAILURE, msg="Unable to read result from tvdb") series = seriesXML.getiterator('Series') results = [] for curSeries in series: results.append({"tvdbid": int(curSeries.findtext('seriesid')), "name": curSeries.findtext('SeriesName'), "first_aired": curSeries.findtext('FirstAired')}) lang_id = self.valid_languages[self.lang] return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id}) elif self.tvdbid: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() lang_id = self.valid_languages[self.lang] if self.lang and not self.lang == 'en': ltvdb_api_parms['language'] = self.lang t = tvdb_api.Tvdb(actors=False, **ltvdb_api_parms) try: myShow = t[int(self.tvdbid)] except (tvdb_exceptions.tvdb_shownotfound, tvdb_exceptions.tvdb_error): logger.log(u"API :: Unable to find show with id " + str(self.tvdbid), logger.WARNING) return _responds(RESULT_SUCCESS, {"results": [], "langid": lang_id}) if not myShow.data['seriesname']: logger.log(u"API :: Found show with tvdbid " + str(self.tvdbid) + ", however it contained no show name", logger.DEBUG) return _responds(RESULT_FAILURE, msg="Show contains no name, invalid result") showOut = [{"tvdbid": self.tvdbid, "name": unicode(myShow.data['seriesname']), "first_aired": myShow.data['firstaired']}] return _responds(RESULT_SUCCESS, {"results": showOut, "langid": lang_id}) else: return _responds(RESULT_FAILURE, msg="Either tvdbid or name is required") class CMD_SickBeardSetDefaults(ApiCall): _help = {"desc": "set sickbeard user defaults", "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders within the show directory"}, "status": {"desc": "status of missing episodes"} } } def __init__(self, args, kwargs): # required # optional self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.future_show_paused, args = self.check_params(args, kwargs, "future_show_paused", None, False, "bool", []) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", None, False, "bool", []) self.status, args = self.check_params(args, kwargs, "status", None, False, "string", ["wanted", "skipped", "archived", "ignored"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set sickbeard user defaults """ quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: sickbeard.QUALITY_DEFAULT = Quality.combineQualities(iqualityID, aqualityID) if self.status: # convert the string status to a int for status in statusStrings.statusStrings: if statusStrings[status].lower() == str(self.status).lower(): self.status = status break # this should be obsolete bcause of the above if not self.status in statusStrings.statusStrings: raise ApiError("Invalid Status") # only allow the status options we want if int(self.status) not in (3, 5, 6, 7): raise ApiError("Status Prohibited") sickbeard.STATUS_DEFAULT = self.status if self.flatten_folders is not None: sickbeard.FLATTEN_FOLDERS_DEFAULT = int(self.flatten_folders) if self.future_show_paused is not None: sickbeard.COMING_EPS_DISPLAY_PAUSED = int(self.future_show_paused) return _responds(RESULT_SUCCESS, msg="Saved defaults") class CMD_SickBeardShutdown(ApiCall): _help = {"desc": "shutdown sickbeard"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ shutdown sickbeard """ threading.Timer(2, sickbeard.invoke_shutdown).start() return _responds(RESULT_SUCCESS, msg="SickBeard is shutting down...") class CMD_Show(ApiCall): _help = {"desc": "display information for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display information for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") showDict = {} showDict["season_list"] = CMD_ShowSeasonList((), {"tvdbid": self.tvdbid}).run()["data"] showDict["cache"] = CMD_ShowCache((), {"tvdbid": self.tvdbid}).run()["data"] genreList = [] if showObj.genre: genreListTmp = showObj.genre.split("|") for genre in genreListTmp: if genre: genreList.append(genre) showDict["genre"] = genreList showDict["quality"] = _get_quality_string(showObj.quality) anyQualities, bestQualities = _mapQuality(showObj.quality) showDict["quality_details"] = {"initial": anyQualities, "archive": bestQualities} try: showDict["location"] = showObj.location except sickbeard.exceptions.ShowDirNotFoundException: showDict["location"] = "" showDict["language"] = showObj.lang showDict["show_name"] = showObj.name showDict["paused"] = showObj.paused showDict["air_by_date"] = showObj.air_by_date showDict["flatten_folders"] = showObj.flatten_folders # clean up tvdb horrible airs field showDict["airs"] = str(showObj.airs).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ') showDict["tvrage_id"] = showObj.tvrid showDict["tvrage_name"] = showObj.tvrname showDict["network"] = showObj.network if not showDict["network"]: showDict["network"] = "" showDict["status"] = showObj.status nextAirdate = '' nextEps = showObj.nextEpisode() if (len(nextEps) != 0): nextAirdate = _ordinal_to_dateForm(nextEps[0].airdate.toordinal()) showDict["next_ep_airdate"] = nextAirdate return _responds(RESULT_SUCCESS, showDict) class CMD_ShowAddExisting(ApiCall): _help = {"desc": "add a show in sickbeard with an existing folder", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "location": {"desc": "full path to the existing folder for the show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders for the show"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a show in sickbeard with an existing folder """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if showObj: return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in the database") if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg='Not a valid location') tvdbName = None tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run() if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]: if not tvdbResult['data']['results']: return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again") if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]: tvdbName = tvdbResult['data']['results'][0]['name'] if not tvdbName: return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} # use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), self.location, SKIPPED, newQuality, int(self.flatten_folders)) # @UndefinedVariable return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added") class CMD_ShowAddNew(ApiCall): _help = {"desc": "add a new show to sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "location": {"desc": "base path for where the show folder is to be created"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders for the show"}, "status": {"desc": "status of missing episodes"}, "lang": {"desc": "the 2 letter lang abbreviation id"} } } valid_languages = { 'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.location, args = self.check_params(args, kwargs, "location", None, False, "string", []) self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", []) self.status, args = self.check_params(args, kwargs, "status", None, False, "string", ["wanted", "skipped", "archived", "ignored"]) self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string", self.valid_languages.keys()) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a show in sickbeard with an existing folder """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if showObj: return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in database") if not self.location: if sickbeard.ROOT_DIRS != "": root_dirs = sickbeard.ROOT_DIRS.split('|') root_dirs.pop(0) default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) self.location = root_dirs[default_index] else: return _responds(RESULT_FAILURE, msg="Root directory is not set, please provide a location") if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg="'" + self.location + "' is not a valid location") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} # use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) # use default status as a failsafe newStatus = sickbeard.STATUS_DEFAULT if self.status: # convert the string status to a int for status in statusStrings.statusStrings: if statusStrings[status].lower() == str(self.status).lower(): self.status = status break # TODO: check if obsolete if not self.status in statusStrings.statusStrings: raise ApiError("Invalid Status") # only allow the status options we want if int(self.status) not in (3, 5, 6, 7): return _responds(RESULT_FAILURE, msg="Status prohibited") newStatus = self.status tvdbName = None tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run() if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]: if not tvdbResult['data']['results']: return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again") if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]: tvdbName = tvdbResult['data']['results'][0]['name'] if not tvdbName: return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb") # moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors showPath = ek.ek(os.path.join, self.location, helpers.sanitizeFileName(tvdbName)) # don't create show dir if config says not to if sickbeard.ADD_SHOWS_WO_DIR: logger.log(u"Skipping initial creation of " + showPath + " due to config.ini setting") else: dir_exists = helpers.makeDir(showPath) if not dir_exists: logger.log(u"API :: Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) return _responds(RESULT_FAILURE, {"path": showPath}, "Unable to create the folder " + showPath + ", can't add the show") else: helpers.chmodAsParent(showPath) sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), showPath, newStatus, newQuality, int(self.flatten_folders), self.lang) # @UndefinedVariable return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added") class CMD_ShowCache(ApiCall): _help = {"desc": "check sickbeard's cache to see if the banner or poster image for a show is valid", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ check sickbeard's cache to see if the banner or poster image for a show is valid """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # TODO: catch if cache dir is missing/invalid.. so it doesn't break show/show.cache cache_obj = image_cache.ImageCache() has_poster = 0 has_banner = 0 if ek.ek(os.path.isfile, cache_obj.poster_path(showObj.tvdbid)): has_poster = 1 if ek.ek(os.path.isfile, cache_obj.banner_path(showObj.tvdbid)): has_banner = 1 return _responds(RESULT_SUCCESS, {"poster": has_poster, "banner": has_banner}) class CMD_ShowDelete(ApiCall): _help = {"desc": "delete a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ delete a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): # @UndefinedVariable return _responds(RESULT_FAILURE, msg="Show can not be deleted while being added or updated") showObj.deleteShow() return _responds(RESULT_SUCCESS, msg=u"" + showObj.name + " has been deleted") class CMD_ShowGetQuality(ApiCall): _help = {"desc": "get quality setting for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get quality setting for a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") anyQualities, bestQualities = _mapQuality(showObj.quality) return _responds(RESULT_SUCCESS, {"initial": anyQualities, "archive": bestQualities}) class CMD_ShowGetPoster(ApiCall): _help = {"desc": "get the poster stored for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the poster for a show in sickbeard """ return {'outputType': 'image', 'image': webserve.WebInterface().showPoster(self.tvdbid, 'poster')} class CMD_ShowGetBanner(ApiCall): _help = {"desc": "get the banner stored for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the banner for a show in sickbeard """ return {'outputType': 'image', 'image': webserve.WebInterface().showPoster(self.tvdbid, 'banner')} class CMD_ShowPause(ApiCall): _help = {"desc": "set a show's paused state in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"pause": {"desc": "set the pause state of the show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.pause, args = self.check_params(args, kwargs, "pause", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set a show's paused state in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") if self.pause is True: showObj.paused = 1 return _responds(RESULT_SUCCESS, msg=u"" + showObj.name + " has been paused") else: showObj.paused = 0 return _responds(RESULT_SUCCESS, msg=u"" + showObj.name + " has been unpaused") return _responds(RESULT_FAILURE, msg=u"" + showObj.name + " was unable to be paused") class CMD_ShowRefresh(ApiCall): _help = {"desc": "refresh a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ refresh a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") try: sickbeard.showQueueScheduler.action.refreshShow(showObj) # @UndefinedVariable return _responds(RESULT_SUCCESS, msg=u"" + showObj.name + " has queued to be refreshed") except exceptions.CantRefreshException, e: logger.log(u"API:: Unable to refresh " + showObj.name + ". " + str(ex(e)), logger.ERROR) return _responds(RESULT_FAILURE, msg=u"Unable to refresh " + showObj.name) class CMD_ShowSeasonList(ApiCall): _help = {"desc": "display the season list for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"sort": {"desc": "change the sort order from descending to ascending"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.sort, args = self.check_params(args, kwargs, "sort", "desc", False, "string", ["asc", "desc"]) # "asc" and "desc" default and fallback is "desc" # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the season list for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") if self.sort == "asc": sqlResults = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season ASC", [self.tvdbid]) else: sqlResults = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [self.tvdbid]) # a list with all season numbers seasonList = [] for row in sqlResults: seasonList.append(int(row["season"])) myDB.connection.close() return _responds(RESULT_SUCCESS, seasonList) class CMD_ShowSeasons(ApiCall): _help = {"desc": "display a listing of episodes for all or a given season", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"season": {"desc": "the season number"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.season, args = self.check_params(args, kwargs, "season", None, False, "int", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display a listing of episodes for all or a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") if self.season is None: sqlResults = myDB.select("SELECT name, episode, airdate, status, season FROM tv_episodes WHERE showid = ?", [self.tvdbid]) seasons = {} for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["status"])) row["status"] = _get_status_Strings(status) row["quality"] = _get_quality_string(quality) row["airdate"] = _ordinal_to_dateForm(row["airdate"]) curSeason = int(row["season"]) curEpisode = int(row["episode"]) del row["season"] del row["episode"] if not curSeason in seasons: seasons[curSeason] = {} seasons[curSeason][curEpisode] = row else: sqlResults = myDB.select("SELECT name, episode, airdate, status FROM tv_episodes WHERE showid = ? AND season = ?", [self.tvdbid, self.season]) if len(sqlResults) is 0: return _responds(RESULT_FAILURE, msg="Season not found") seasons = {} for row in sqlResults: curEpisode = int(row["episode"]) del row["episode"] status, quality = Quality.splitCompositeStatus(int(row["status"])) row["status"] = _get_status_Strings(status) row["quality"] = _get_quality_string(quality) row["airdate"] = _ordinal_to_dateForm(row["airdate"]) if not curEpisode in seasons: seasons[curEpisode] = {} seasons[curEpisode] = row myDB.connection.close() return _responds(RESULT_SUCCESS, seasons) class CMD_ShowSetQuality(ApiCall): _help = {"desc": "set desired quality of a show in sickbeard. if neither initial or archive are provided then the config default quality will be used", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # this for whatever reason removes hdbluray not sdtv... which is just wrong. reverting to previous code.. plus we didnt use the new code everywhere. # self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", _getQualityMap().values()[1:]) self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set the quality for a show in sickbeard by taking in a deliminated string of qualities, map to their value and combine for new values """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} # use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) showObj.quality = newQuality return _responds(RESULT_SUCCESS, msg=showObj.name + " quality has been changed to " + _get_quality_string(showObj.quality)) class CMD_ShowStats(ApiCall): _help = {"desc": "display episode statistics for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display episode statistics for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # show stats episode_status_counts_total = {} episode_status_counts_total["total"] = 0 for status in statusStrings.statusStrings.keys(): if status in [UNKNOWN, DOWNLOADED, SNATCHED, SNATCHED_PROPER]: continue episode_status_counts_total[status] = 0 # add all the downloaded qualities episode_qualities_counts_download = {} episode_qualities_counts_download["total"] = 0 for statusCode in Quality.DOWNLOADED: status, quality = Quality.splitCompositeStatus(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_download[statusCode] = 0 # add all snatched qualities episode_qualities_counts_snatch = {} episode_qualities_counts_snatch["total"] = 0 for statusCode in Quality.SNATCHED + Quality.SNATCHED_PROPER: status, quality = Quality.splitCompositeStatus(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_snatch[statusCode] = 0 myDB = db.DBConnection(row_type="dict") sqlResults = myDB.select("SELECT status, season, airdate FROM tv_episodes WHERE season > 0 AND episode > 0 AND showid = ?", [self.tvdbid]) # the main loop that goes through all episodes for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["status"])) episode_status_counts_total["total"] += 1 if status in Quality.DOWNLOADED: episode_qualities_counts_download["total"] += 1 episode_qualities_counts_download[int(row["status"])] += 1 elif status in Quality.SNATCHED + Quality.SNATCHED_PROPER: episode_qualities_counts_snatch["total"] += 1 episode_qualities_counts_snatch[int(row["status"])] += 1 # we dont count NONE = 0 = N/A elif status == 0 or row["airdate"] == 1: episode_status_counts_total["total"] -= 1 else: episode_status_counts_total[status] += 1 # the outgoing container episodes_stats = {} episodes_stats["downloaded"] = {} # turning codes into strings for statusCode in episode_qualities_counts_download: if statusCode is "total": episodes_stats["downloaded"]["total"] = episode_qualities_counts_download[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) quality_string = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") episodes_stats["downloaded"][quality_string] = episode_qualities_counts_download[statusCode] episodes_stats["snatched"] = {} # turning codes into strings for statusCode in episode_qualities_counts_snatch: if statusCode is "total": episodes_stats["snatched"]["total"] = episode_qualities_counts_snatch[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) quality_string = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") # count qualities for snatched and snatched_proper if quality_string in episodes_stats["snatched"]: episodes_stats["snatched"][quality_string] += episode_qualities_counts_snatch[statusCode] else: episodes_stats["snatched"][quality_string] = episode_qualities_counts_snatch[statusCode] # episodes_stats["total"] = {} for statusCode in episode_status_counts_total: if statusCode is "total": episodes_stats["total"] = episode_status_counts_total[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) statusString = statusStrings.statusStrings[statusCode].lower().replace(" ", "_").replace("(", "").replace(")", "") episodes_stats[statusString] = episode_status_counts_total[statusCode] myDB.connection.close() return _responds(RESULT_SUCCESS, episodes_stats) class CMD_ShowUpdate(ApiCall): _help = {"desc": "update a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ update a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) # @UndefinedVariable return _responds(RESULT_SUCCESS, msg=u"" + showObj.name + " has queued to be updated") except exceptions.CantUpdateException, e: logger.log(u"API:: Unable to update " + showObj.name + ". " + str(ex(e)), logger.ERROR) return _responds(RESULT_FAILURE, msg=u"Unable to update " + showObj.name) class CMD_Shows(ApiCall): _help = {"desc": "display all shows in sickbeard", "optionalParameters": {"sort": {"desc": "sort the list of shows by show name instead of tvdbid"}, "paused": {"desc": "only show the shows that are set to paused"}, }, } def __init__(self, args, kwargs): # required # optional self.sort, args = self.check_params(args, kwargs, "sort", "id", False, "string", ["id", "name"]) self.paused, args = self.check_params(args, kwargs, "paused", None, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display_is_int_multi( self.tvdbid )shows in sickbeard """ shows = {} for curShow in sickbeard.showList: nextAirdate = '' nextEps = curShow.nextEpisode() if (len(nextEps) != 0): nextAirdate = _ordinal_to_dateForm(nextEps[0].airdate.toordinal()) if self.paused is not None and bool(self.paused) != bool(curShow.paused): continue showDict = {"paused": curShow.paused, "quality": _get_quality_string(curShow.quality), "language": curShow.lang, "air_by_date": curShow.air_by_date, "tvdbid": curShow.tvdbid, "tvrage_id": curShow.tvrid, "tvrage_name": curShow.tvrname, "network": curShow.network, "show_name": curShow.name, "status": curShow.status, "next_ep_airdate": nextAirdate} showDict["cache"] = CMD_ShowCache((), {"tvdbid": curShow.tvdbid}).run()["data"] if not showDict["network"]: showDict["network"] = "" if self.sort == "name": shows[curShow.name] = showDict else: shows[curShow.tvdbid] = showDict return _responds(RESULT_SUCCESS, shows) class CMD_ShowsStats(ApiCall): _help = {"desc": "display the global shows and episode stats" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the global shows and episode stats """ stats = {} myDB = db.DBConnection() today = str(datetime.date.today().toordinal()) status_snatched = '(' + ','.join([str(quality) for quality in Quality.SNATCHED + Quality.SNATCHED_PROPER]) + ')' status_download = '(' + ','.join([str(quality) for quality in Quality.DOWNLOADED + [ARCHIVED]]) + ')' sql_statement = 'SELECT ' sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND status IN ' + status_snatched + ') AS ep_snatched, ' sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 AND status IN ' + status_download + ') AS ep_downloaded, ' sql_statement += '(SELECT COUNT(*) FROM tv_episodes WHERE season > 0 AND episode > 0 ' sql_statement += ' AND ((airdate > 1 AND airdate <= ' + today + ' AND (status = ' + str(SKIPPED) + ' OR status = ' + str(WANTED) + ')) ' sql_statement += ' OR (status IN ' + status_snatched + ') OR (status IN ' + status_download + '))) AS ep_total ' sql_statement += ' FROM tv_episodes tv_eps LIMIT 1' sql_result = myDB.select(sql_statement) stats["shows_total"] = len(sickbeard.showList) stats["shows_active"] = len([show for show in sickbeard.showList if show.paused == 0 and show.status != "Ended"]) if sql_result: stats["ep_snatched"] = sql_result[0]['ep_snatched'] stats["ep_downloaded"] = sql_result[0]['ep_downloaded'] stats["ep_total"] = sql_result[0]['ep_total'] else: stats["ep_snatched"] = 0 stats["ep_downloaded"] = 0 stats["ep_total"] = 0 myDB.connection.close() return _responds(RESULT_SUCCESS, stats) # WARNING: never define a cmd call string that contains a "_" (underscore) # this is reserved for cmd indexes used while cmd chaining # WARNING: never define a param name that contains a "." (dot) # this is reserved for cmd namspaces used while cmd chaining _functionMaper = {"help": CMD_Help, "future": CMD_ComingEpisodes, "episode": CMD_Episode, "episode.search": CMD_EpisodeSearch, "episode.setstatus": CMD_EpisodeSetStatus, "exceptions": CMD_Exceptions, "history": CMD_History, "history.clear": CMD_HistoryClear, "history.trim": CMD_HistoryTrim, "logs": CMD_Logs, "postprocess": CMD_PostProcess, "sb": CMD_SickBeard, "sb.addrootdir": CMD_SickBeardAddRootDir, "sb.checkscheduler": CMD_SickBeardCheckScheduler, "sb.deleterootdir": CMD_SickBeardDeleteRootDir, "sb.forcesearch": CMD_SickBeardForceSearch, "sb.getdefaults": CMD_SickBeardGetDefaults, "sb.getmessages": CMD_SickBeardGetMessages, "sb.getrootdirs": CMD_SickBeardGetRootDirs, "sb.pausebacklog": CMD_SickBeardPauseBacklog, "sb.ping": CMD_SickBeardPing, "sb.restart": CMD_SickBeardRestart, "sb.searchtvdb": CMD_SickBeardSearchTVDB, "sb.setdefaults": CMD_SickBeardSetDefaults, "sb.shutdown": CMD_SickBeardShutdown, "show": CMD_Show, "show.addexisting": CMD_ShowAddExisting, "show.addnew": CMD_ShowAddNew, "show.cache": CMD_ShowCache, "show.delete": CMD_ShowDelete, "show.getquality": CMD_ShowGetQuality, "show.getposter": CMD_ShowGetPoster, "show.getbanner": CMD_ShowGetBanner, "show.pause": CMD_ShowPause, "show.refresh": CMD_ShowRefresh, "show.seasonlist": CMD_ShowSeasonList, "show.seasons": CMD_ShowSeasons, "show.setquality": CMD_ShowSetQuality, "show.stats": CMD_ShowStats, "show.update": CMD_ShowUpdate, "shows": CMD_Shows, "shows.stats": CMD_ShowsStats }
107,938
Python
.py
2,087
39.8184
776
0.584009
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,322
show_name_helpers.py
midgetspy_Sick-Beard/sickbeard/show_name_helpers.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard from sickbeard.common import countryList from sickbeard.helpers import sanitizeSceneName from sickbeard.scene_exceptions import get_scene_exceptions from sickbeard import logger from sickbeard import db import re import datetime from name_parser.parser import NameParser, InvalidNameException resultFilters = ["sub(bed|ed|pack|s)", "(dk|fin|heb|kor|nl|nor|nordic|pl|swe)sub(bed|ed|s)?", "(dir|sample|sub|nfo|proof)fix(es)?", "sample", "(dvd)?extras", "dub(bed)?"] def filterBadReleases(name): """ Filters out non-english and just all-around stupid releases by comparing them to the resultFilters contents. name: the release name to check Returns: True if the release name is OK, False if it's bad. """ try: fp = NameParser() parse_result = fp.parse(name) except InvalidNameException: logger.log(u"Unable to parse the filename " + name + " into a valid episode", logger.WARNING) return False # use the extra info and the scene group to filter against check_string = '' if parse_result.extra_info: check_string = parse_result.extra_info if parse_result.release_group: if check_string: check_string = check_string + '-' + parse_result.release_group else: check_string = parse_result.release_group # if there's no info after the season info then assume it's fine if not check_string: return True # if any of the bad strings are in the name then say no for ignore_word in resultFilters + sickbeard.IGNORE_WORDS.split(','): ignore_word = ignore_word.strip() if ignore_word: if re.search('(^|[\W_])' + ignore_word + '($|[\W_])', check_string, re.I): logger.log(u"Invalid scene release: " + name + " contains " + ignore_word + ", ignoring it", logger.DEBUG) return False return True def sceneToNormalShowNames(name): """ Takes a show name from a scene dirname and converts it to a more "human-readable" format. name: The show name to convert Returns: a list of all the possible "normal" names """ if not name: return [] name_list = [name] # use both and and & new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I) if new_name not in name_list: name_list.append(new_name) results = [] for cur_name in name_list: # add brackets around the year results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name)) # add brackets around the country country_match_str = '|'.join(countryList.values()) results.append(re.sub('(?i)([. _-])(' + country_match_str + ')$', '\\1(\\2)', cur_name)) results += name_list return list(set(results)) def makeSceneShowSearchStrings(show): showNames = allPossibleShowNames(show) # scenify the names return map(sanitizeSceneName, showNames) def makeSceneSeasonSearchString(show, segment, extraSearchType=None): myDB = db.DBConnection() if show.air_by_date: numseasons = 0 # the search string for air by date shows is just seasonStrings = [segment] else: numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [show.tvdbid]) numseasons = int(numseasonsSQlResult[0][0]) seasonStrings = ["S%02d" % segment] showNames = set(makeSceneShowSearchStrings(show)) toReturn = [] # search each show name for curShow in showNames: # most providers all work the same way if not extraSearchType: # if there's only one season then we can just use the show name straight up if numseasons == 1: toReturn.append(curShow) # for providers that don't allow multiple searches in one request we only search for Sxx style stuff else: for cur_season in seasonStrings: toReturn.append(curShow + "." + cur_season) return toReturn def makeSceneSearchString(episode): myDB = db.DBConnection() numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [episode.show.tvdbid]) numseasons = int(numseasonsSQlResult[0][0]) # see if we should use dates instead of episodes if episode.show.air_by_date and episode.airdate != datetime.date.fromordinal(1): epStrings = [str(episode.airdate)] else: epStrings = ["S%02iE%02i" % (int(episode.season), int(episode.episode)), "%ix%02i" % (int(episode.season), int(episode.episode))] # for single-season shows just search for the show name if numseasons == 1: epStrings = [''] showNames = set(makeSceneShowSearchStrings(episode.show)) toReturn = [] for curShow in showNames: for curEpString in epStrings: if curEpString != '': toReturn.append(curShow + '.' + curEpString) else: toReturn.append(curShow) return toReturn def isGoodResult(name, show, log=True): """ Use an automatically-created regex to make sure the result actually is the show it claims to be """ all_show_names = allPossibleShowNames(show) showNames = map(sanitizeSceneName, all_show_names) + all_show_names for curName in set(showNames): escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName)) if show.startyear: escaped_name += "(?:\W+" + str(show.startyear) + ")?" curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+)' if log: logger.log(u"Checking if show " + name + " matches " + curRegex, logger.DEBUG) match = re.search(curRegex, name, re.I) if match: logger.log(u"Matched " + curRegex + " to " + name, logger.DEBUG) return True if log: logger.log(u"Provider gave result " + name + " but that doesn't seem like a valid result for " + show.name + " so I'm ignoring it") return False def uniqify(seq, idfun=None): # http://www.peterbe.com/plog/uniqifiers-benchmark if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def allPossibleShowNames(show): """ Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name, country codes on the end, eg. "Show Name (AU)", and any scene exception names. show: a TVShow object that we should get the names of Returns: a list of all the possible show names """ showNames = [show.name] showNames += [name for name in get_scene_exceptions(show.tvdbid)] # if we have a tvrage name then use it if show.tvrname != "" and show.tvrname is not None: showNames.append(show.tvrname) newShowNames = [] country_list = countryList country_list.update(dict(zip(countryList.values(), countryList.keys()))) # if we have "Show Name Australia" or "Show Name (Australia)" this will add "Show Name (AU)" for # any countries defined in common.countryList (and vice versa) for curName in set(showNames): if not curName: continue for curCountry in country_list: if curName.endswith(' ' + curCountry): newShowNames.append(curName.replace(' ' + curCountry, ' (' + country_list[curCountry] + ')')) elif curName.endswith(' (' + curCountry + ')'): newShowNames.append(curName.replace(' (' + curCountry + ')', ' (' + country_list[curCountry] + ')')) showNames += newShowNames # at this point we could have duplicates due to case-ing, prune dupes return uniqify(showNames, lambda x: x.lower())
9,146
Python
.py
196
38.244898
158
0.636209
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,323
classes.py
midgetspy_Sick-Beard/sickbeard/classes.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import datetime from common import Quality class SearchResult: """ Represents a search result from an indexer. """ def __init__(self, episodes): self.provider = -1 # URL to the NZB/torrent file self.url = "" # used by some providers to store extra info associated with the result self.extraInfo = [] # list of TVEpisode objects that this result is associated with self.episodes = episodes # quality of the release self.quality = Quality.UNKNOWN # release name self.name = "" def __str__(self): if self.provider == None: return "Invalid provider, unable to print self" myString = self.provider.name + " @ " + self.url + "\n" myString += "Extra Info:\n" for extra in self.extraInfo: myString += " " + extra + "\n" return myString def fileName(self): return self.episodes[0].prettyName() + "." + self.resultType class NZBSearchResult(SearchResult): """ Regular NZB result with an URL to the NZB """ resultType = "nzb" class NZBDataSearchResult(SearchResult): """ NZB result where the actual NZB XML data is stored in the extraInfo """ resultType = "nzbdata" class TorrentSearchResult(SearchResult): """ Torrent result with an URL to the torrent """ resultType = "torrent" class ShowListUI: """ This class is for tvdb-api. Instead of prompting with a UI to pick the desired result out of a list of shows it tries to be smart about it based on what shows are in SB. """ def __init__(self, config, log=None): self.config = config self.log = log def selectSeries(self, allSeries): idList = [x.tvdbid for x in sickbeard.showList] # try to pick a show that's in my show list for curShow in allSeries: if int(curShow['id']) in idList: return curShow # if nothing matches then just go with the first match I guess return allSeries[0] class Proper: def __init__(self, name, url, date): self.name = name self.url = url self.date = date self.provider = None self.quality = Quality.UNKNOWN self.tvdbid = -1 self.season = -1 self.episode = -1 def __str__(self): return str(self.date)+" "+self.name+" "+str(self.season)+"x"+str(self.episode)+" of "+str(self.tvdbid) class ErrorViewer(): """ Keeps a static list of UIErrors to be displayed on the UI and allows the list to be cleared. """ errors = [] def __init__(self): ErrorViewer.errors = [] @staticmethod def add(error): ErrorViewer.errors.append(error) @staticmethod def clear(): ErrorViewer.errors = [] class UIError(): """ Represents an error to be displayed in the web UI. """ def __init__(self, message): self.message = message self.time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
3,997
Python
.py
111
28.738739
111
0.62899
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,324
logger.py
midgetspy_Sick-Beard/sickbeard/logger.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import sys import threading import logging import sickbeard from sickbeard import classes # number of log files to keep NUM_LOGS = 3 # log size in bytes LOG_SIZE = 10000000 # 10 megs ERROR = logging.ERROR WARNING = logging.WARNING MESSAGE = logging.INFO DEBUG = logging.DEBUG reverseNames = {u'ERROR': ERROR, u'WARNING': WARNING, u'INFO': MESSAGE, u'DEBUG': DEBUG} class SBRotatingLogHandler(object): def __init__(self, log_file, num_files, num_bytes): self.num_files = num_files self.num_bytes = num_bytes self.log_file = log_file self.log_file_path = log_file self.cur_handler = None self.writes_since_check = 0 self.log_lock = threading.Lock() self.console_logging = False def close_log(self, handler=None): if not handler: handler = self.cur_handler if handler: sb_logger = logging.getLogger('sickbeard') sb_logger.removeHandler(handler) handler.flush() handler.close() def initLogging(self, consoleLogging=False): if consoleLogging: self.console_logging = consoleLogging old_handler = None # get old handler in case we want to close it if self.cur_handler: old_handler = self.cur_handler else: # only start consoleLogging on first initialize if self.console_logging: # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S')) # add the handler to the root logger logging.getLogger('sickbeard').addHandler(console) self.log_file_path = os.path.join(sickbeard.LOG_DIR, self.log_file) self.cur_handler = self._config_handler() logging.getLogger('sickbeard').addHandler(self.cur_handler) logging.getLogger('sickbeard').setLevel(logging.DEBUG) # already logging in new log folder, close the old handler if old_handler: self.close_log(old_handler) def _config_handler(self): """ Configure a file handler to log at file_name and return it. """ file_handler = logging.FileHandler(self.log_file_path, encoding='utf-8') file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S')) return file_handler def _log_file_name(self, i): """ Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends it to the extension (blah.log.3 for i == 3) i: Log number to ues """ return self.log_file_path + ('.' + str(i) if i else '') def _num_logs(self): """ Scans the log folder and figures out how many log files there are already on disk Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1 """ cur_log = 0 while os.path.isfile(self._log_file_name(cur_log)): cur_log += 1 return cur_log - 1 def _rotate_logs(self): sb_logger = logging.getLogger('sickbeard') # delete the old handler if self.cur_handler: self.close_log() # rename or delete all the old log files for i in range(self._num_logs(), -1, -1): cur_file_name = self._log_file_name(i) try: if i >= NUM_LOGS: os.remove(cur_file_name) else: os.rename(cur_file_name, self._log_file_name(i + 1)) except OSError: pass # the new log handler will always be on the un-numbered .log file new_file_handler = self._config_handler() self.cur_handler = new_file_handler sb_logger.addHandler(new_file_handler) def log(self, toLog, logLevel=MESSAGE): with self.log_lock: # check the size and see if we need to rotate if self.writes_since_check >= 10: if os.path.isfile(self.log_file_path) and os.path.getsize(self.log_file_path) >= LOG_SIZE: self._rotate_logs() self.writes_since_check = 0 else: self.writes_since_check += 1 meThread = threading.currentThread().getName() message = meThread + u" :: " + toLog out_line = message sb_logger = logging.getLogger('sickbeard') try: if logLevel == DEBUG: sb_logger.debug(out_line) elif logLevel == MESSAGE: sb_logger.info(out_line) elif logLevel == WARNING: sb_logger.warning(out_line) elif logLevel == ERROR: sb_logger.error(out_line) # add errors to the UI logger classes.ErrorViewer.add(classes.UIError(message)) else: sb_logger.log(logLevel, out_line) except ValueError: pass def log_error_and_exit(self, error_msg): log(error_msg, ERROR) if not self.console_logging: sys.exit(error_msg.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')) else: sys.exit(1) sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE) def close(): sb_log_instance.close_log() def log_error_and_exit(error_msg): sb_log_instance.log_error_and_exit(error_msg) def log(toLog, logLevel=MESSAGE): sb_log_instance.log(toLog, logLevel)
7,019
Python
.py
162
32.32716
119
0.597805
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,325
webserveInit.py
midgetspy_Sick-Beard/sickbeard/webserveInit.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import cherrypy.lib.auth_basic import os.path import sickbeard from sickbeard import logger from sickbeard.webserve import WebInterface from sickbeard.helpers import create_https_certificates def initWebServer(options={}): options.setdefault('port', 8081) options.setdefault('host', '0.0.0.0') options.setdefault('log_dir', None) options.setdefault('username', '') options.setdefault('password', '') options.setdefault('web_root', '/') assert isinstance(options['port'], int) assert 'data_root' in options def http_error_401_hander(status, message, traceback, version): """ Custom handler for 401 error """ if status != "401 Unauthorized": logger.log(u"CherryPy caught an error: %s %s" % (status, message), logger.ERROR) logger.log(traceback, logger.DEBUG) return r'''<!DOCTYPE html> <html> <head> <title>%s</title> </head> <body> <br/> <font color="#0000FF">Error %s: You need to provide a valid username and password.</font> </body> </html> ''' % ('Access denied', status) def http_error_404_hander(status, message, traceback, version): """ Custom handler for 404 error, redirect back to main page """ return r'''<!DOCTYPE html> <html> <head> <title>404</title> <script> <!-- location.href = "%s/home/" //--> </script> </head> <body> <br/> </body> </html> ''' % options['web_root'] # cherrypy setup enable_https = options['enable_https'] https_cert = options['https_cert'] https_key = options['https_key'] if enable_https: # If either the HTTPS certificate or key do not exist, make some self-signed ones. if not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key)): if not create_https_certificates(https_cert, https_key): logger.log(u"Unable to create CERT/KEY files, disabling HTTPS") sickbeard.ENABLE_HTTPS = False enable_https = False if not (os.path.exists(https_cert) and os.path.exists(https_key)): logger.log(u"Disabled HTTPS because of missing CERT and KEY files", logger.WARNING) sickbeard.ENABLE_HTTPS = False enable_https = False mime_gzip = ('text/html', 'text/plain', 'text/css', 'text/javascript', 'application/javascript', 'text/x-javascript', 'application/x-javascript', 'text/x-json', 'application/json' ) options_dict = { 'server.socket_port': options['port'], 'server.socket_host': options['host'], 'log.screen': False, 'engine.autoreload.on': False, 'engine.autoreload.frequency': 100, 'engine.reexec_retry': 100, 'tools.gzip.on': True, 'tools.gzip.mime_types': mime_gzip, 'error_page.401': http_error_401_hander, 'error_page.404': http_error_404_hander, 'tools.autoproxy.on': True, } if enable_https: options_dict['server.ssl_certificate'] = https_cert options_dict['server.ssl_private_key'] = https_key protocol = "https" else: protocol = "http" logger.log(u"Starting Sick Beard on " + protocol + "://" + str(options['host']) + ":" + str(options['port']) + "/") cherrypy.config.update(options_dict) # setup cherrypy logging if options['log_dir'] and os.path.isdir(options['log_dir']): cherrypy.config.update({ 'log.access_file': os.path.join(options['log_dir'], "cherrypy.log") }) logger.log(u'Using %s for cherrypy log' % cherrypy.config['log.access_file']) conf = { '/': { 'tools.staticdir.root': options['data_root'], 'tools.encode.on': True, 'tools.encode.encoding': 'utf-8', }, '/images': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'images' }, '/js': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'js' }, '/css': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'css' }, } app = cherrypy.tree.mount(WebInterface(), options['web_root'], conf) # auth if options['username'] != "" and options['password'] != "": checkpassword = cherrypy.lib.auth_basic.checkpassword_dict({options['username']: options['password']}) app.merge({ '/': { 'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'SickBeard', 'tools.auth_basic.checkpassword': checkpassword }, '/api': { 'tools.auth_basic.on': False }, '/api/builder': { 'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'SickBeard', 'tools.auth_basic.checkpassword': checkpassword } }) # Ensure that when behind a mod_rewrite Apache reverse proxy, # both direct requests and proxied requests are handled properly. def autoproxy( base = None, local = 'X-Forwarded-Host', remote = 'X-Forwarded-For', scheme = 'X-Forwarded-Proto', debug = False, ): """ Apply the CherryPy proxy tool only if the ``local`` header is set. Notice that it maps the parameters to the original proxy tool. Use it as per the usual proxy tool: tools.autoproxy.on: True tools.autoproxy.base: "http://www.mydomain.com" """ # or to look for all of them # h = cherrypy.serving.request.headers # if local in h and remote in h and scheme in h: if local in cherrypy.serving.request.headers: cherrypy.lib.cptools.proxy(base, local, remote, scheme, debug) cherrypy.tools.autoproxy = cherrypy.Tool( 'before_request_body', autoproxy, priority = 30, ) cherrypy.server.start() cherrypy.server.wait()
7,165
Python
.py
181
30.209945
120
0.585039
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,326
searchBacklog.py
midgetspy_Sick-Beard/sickbeard/searchBacklog.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import threading import sickbeard from sickbeard import db, scheduler from sickbeard import search_queue from sickbeard import logger from sickbeard import ui class BacklogSearchScheduler(scheduler.Scheduler): def forceSearch(self): self.action._set_lastBacklog(1) self.lastRun = datetime.datetime.fromordinal(1) def nextRun(self): if self.action._lastBacklog <= 1: return datetime.date.today() else: return datetime.date.fromordinal(self.action._lastBacklog + self.action.cycleTime) class BacklogSearcher: def __init__(self): self._lastBacklog = self._get_lastBacklog() self.cycleTime = 7 self.lock = threading.Lock() self.amActive = False self.amPaused = False self.amWaiting = False self._resetPI() def _resetPI(self): self.percentDone = 0 self.currentSearchInfo = {'title': 'Initializing'} def getProgressIndicator(self): if self.amActive: return ui.ProgressIndicator(self.percentDone, self.currentSearchInfo) else: return None def am_running(self): logger.log(u"amWaiting: " + str(self.amWaiting) + ", amActive: " + str(self.amActive), logger.DEBUG) return (not self.amWaiting) and self.amActive def searchBacklog(self, which_shows=None): if which_shows: show_list = which_shows else: show_list = sickbeard.showList def titler(x): if not x: return x if not x.lower().startswith('a to ') and x.lower().startswith('a '): x = x[2:] elif x.lower().startswith('an '): x = x[3:] elif x.lower().startswith('the '): x = x[4:] return x # sort shows the same way we show them, makes it easier to follow along show_list = sorted(show_list, lambda x, y: cmp(titler(x.name), titler(y.name))) if self.amActive is True: logger.log(u"Backlog is still running, not starting it again", logger.DEBUG) return self._get_lastBacklog() curDate = datetime.date.today().toordinal() fromDate = datetime.date.fromordinal(1) if not which_shows and not curDate - self._lastBacklog >= self.cycleTime: logger.log(u"Running limited backlog on recently missed episodes only") fromDate = datetime.date.today() - datetime.timedelta(days=7) self.amActive = True self.amPaused = False #myDB = db.DBConnection() #numSeasonResults = myDB.select("SELECT DISTINCT(season), showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 AND ep.airdate > ?", [fromDate.toordinal()]) # get separate lists of the season/date shows #season_shows = [x for x in show_list if not x.air_by_date] air_by_date_shows = [x for x in show_list if x.air_by_date] # figure out how many segments of air by date shows we're going to do air_by_date_segments = [] for cur_id in [x.tvdbid for x in air_by_date_shows]: air_by_date_segments += self._get_air_by_date_segments(cur_id, fromDate) logger.log(u"Air-by-date segments: " + str(air_by_date_segments), logger.DEBUG) #totalSeasons = float(len(numSeasonResults) + len(air_by_date_segments)) #numSeasonsDone = 0.0 # go through non air-by-date shows and see if they need any episodes for curShow in show_list: if curShow.paused: continue if curShow.air_by_date: segments = [x[1] for x in self._get_air_by_date_segments(curShow.tvdbid, fromDate)] else: segments = self._get_season_segments(curShow.tvdbid, fromDate) for cur_segment in segments: self.currentSearchInfo = {'title': curShow.name + " Season " + str(cur_segment)} backlog_queue_item = search_queue.BacklogQueueItem(curShow, cur_segment) if not backlog_queue_item.wantSeason: logger.log(u"Nothing in season " + str(cur_segment) + " needs to be downloaded, skipping this season", logger.DEBUG) else: sickbeard.searchQueueScheduler.action.add_item(backlog_queue_item) # @UndefinedVariable # don't consider this an actual backlog search if we only did recent eps # or if we only did certain shows if fromDate == datetime.date.fromordinal(1) and not which_shows: self._set_lastBacklog(curDate) self.amActive = False self._resetPI() def _get_lastBacklog(self): logger.log(u"Retrieving the last check time from the DB", logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM info") if len(sqlResults) == 0: lastBacklog = 1 elif sqlResults[0]["last_backlog"] is None or sqlResults[0]["last_backlog"] == "": lastBacklog = 1 else: lastBacklog = int(sqlResults[0]["last_backlog"]) if lastBacklog > datetime.date.today().toordinal(): lastBacklog = 1 self._lastBacklog = lastBacklog return self._lastBacklog def _get_season_segments(self, tvdb_id, fromDate): myDB = db.DBConnection() sqlResults = myDB.select("SELECT DISTINCT(season) as season FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?", [tvdb_id, fromDate.toordinal()]) return [int(x["season"]) for x in sqlResults] def _get_air_by_date_segments(self, tvdb_id, fromDate): # query the DB for all dates for this show myDB = db.DBConnection() num_air_by_date_results = myDB.select("SELECT airdate, showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 ANd ep.airdate > ? AND ep.showid = ?", [fromDate.toordinal(), tvdb_id]) # break them apart into month/year strings air_by_date_segments = [] for cur_result in num_air_by_date_results: cur_date = datetime.date.fromordinal(int(cur_result["airdate"])) cur_date_str = str(cur_date)[:7] cur_tvdb_id = int(cur_result["showid"]) cur_result_tuple = (cur_tvdb_id, cur_date_str) if cur_result_tuple not in air_by_date_segments: air_by_date_segments.append(cur_result_tuple) return air_by_date_segments def _set_lastBacklog(self, when): logger.log(u"Setting the last backlog in the DB to " + str(when), logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM info") if len(sqlResults) == 0: myDB.action("INSERT INTO info (last_backlog, last_TVDB) VALUES (?,?)", [str(when), 0]) else: myDB.action("UPDATE info SET last_backlog=" + str(when)) def run(self): try: self.searchBacklog() except: self.amActive = False raise
8,227
Python
.py
162
39.876543
220
0.621309
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,327
webserve.py
midgetspy_Sick-Beard/sickbeard/webserve.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os.path import time import urllib import re import threading import datetime import random from Cheetah.Template import Template import cherrypy.lib import sickbeard from sickbeard import config, sab from sickbeard import history, notifiers, processTV from sickbeard import ui from sickbeard import logger, helpers, exceptions, classes, db from sickbeard import encodingKludge as ek from sickbeard import search_queue from sickbeard import image_cache from sickbeard import naming from sickbeard.providers import newznab from sickbeard.common import Quality, Overview, statusStrings from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED from sickbeard.exceptions import ex from sickbeard.webapi import Api from lib.tvdb_api import tvdb_api, tvdb_exceptions try: import json except ImportError: from lib import simplejson as json try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree from sickbeard import browser class PageTemplate (Template): def __init__(self, *args, **KWs): KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file']) super(PageTemplate, self).__init__(*args, **KWs) self.sbRoot = sickbeard.WEB_ROOT self.sbHttpPort = sickbeard.WEB_PORT self.sbHttpsPort = sickbeard.WEB_PORT self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS if cherrypy.request.headers['Host'][0] == '[': self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0) else: self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0) self.projectHomePage = "http://code.google.com/p/sickbeard/" if "X-Forwarded-Host" in cherrypy.request.headers: self.sbHost = cherrypy.request.headers['X-Forwarded-Host'] if "X-Forwarded-Port" in cherrypy.request.headers: self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port'] self.sbHttpsPort = self.sbHttpPort if "X-Forwarded-Proto" in cherrypy.request.headers: self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False logPageTitle = 'Logs &amp; Errors' if len(classes.ErrorViewer.errors): logPageTitle += ' (' + str(len(classes.ErrorViewer.errors)) + ')' self.logPageTitle = logPageTitle self.sbPID = str(sickbeard.PID) self.menu = [ { 'title': 'Home', 'key': 'home' }, { 'title': 'Coming Episodes', 'key': 'comingEpisodes' }, { 'title': 'History', 'key': 'history' }, { 'title': 'Manage', 'key': 'manage' }, { 'title': 'Config', 'key': 'config' }, { 'title': logPageTitle, 'key': 'errorlogs' }, ] def redirect(abspath, *args, **KWs): assert abspath[0] == '/' raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs) class TVDBWebUI: def __init__(self, config, log=None): self.config = config self.log = log def selectSeries(self, allSeries): searchList = ",".join([x['id'] for x in allSeries]) showDirList = "" for curShowDir in self.config['_showDir']: showDirList += "showDir=" + curShowDir + "&" redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList) def _munge(string): return unicode(string).encode('utf-8', 'xmlcharrefreplace') def _genericMessage(subject, message): t = PageTemplate(file="genericMessage.tmpl") t.submenu = HomeMenu() t.subject = subject t.message = message return _munge(t) def _getEpisode(show, season, episode): if show is None or season is None or episode is None: return "Invalid parameters" showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return "Show not in show list" epObj = showObj.getEpisode(int(season), int(episode)) if epObj is None: return "Episode couldn't be retrieved" return epObj ManageMenu = [ { 'title': 'Backlog Overview', 'path': 'manage/backlogOverview/' }, { 'title': 'Manage Searches', 'path': 'manage/manageSearches/' }, { 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses/' }, ] class ManageSearches: @cherrypy.expose def index(self): t = PageTemplate(file="manage_manageSearches.tmpl") #t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator() t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() # @UndefinedVariable t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() # @UndefinedVariable t.searchStatus = sickbeard.currentSearchScheduler.action.amActive # @UndefinedVariable t.submenu = ManageMenu return _munge(t) @cherrypy.expose def forceSearch(self): # force it to run the next time it looks result = sickbeard.currentSearchScheduler.forceRun() if result: logger.log(u"Search forced") ui.notifications.message('Episode search started', 'Note: RSS feeds may not be updated if retrieved recently') redirect("/manage/manageSearches/") @cherrypy.expose def pauseBacklog(self, paused=None): if paused == "1": sickbeard.searchQueueScheduler.action.pause_backlog() # @UndefinedVariable else: sickbeard.searchQueueScheduler.action.unpause_backlog() # @UndefinedVariable redirect("/manage/manageSearches/") @cherrypy.expose def forceVersionCheck(self): # force a check to see if there is a new version result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) # @UndefinedVariable if result: logger.log(u"Forcing version check") redirect("/manage/manageSearches/") class Manage: manageSearches = ManageSearches() @cherrypy.expose def index(self): t = PageTemplate(file="manage.tmpl") t.submenu = ManageMenu return _munge(t) @cherrypy.expose def showEpisodeStatuses(self, tvdb_id, whichStatus, includeSpecials, excludeNoAirdate): myDB = db.DBConnection() status_list = [int(whichStatus)] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER sqlStatement = "SELECT season, episode, name FROM tv_episodes WHERE showid = ?" if includeSpecials == "False": sqlStatement += " AND season > 0" if excludeNoAirdate == "True": sqlStatement += " AND airdate > 1" cur_show_results = myDB.select(sqlStatement + " AND status IN (" + ','.join(['?'] * len(status_list)) + ")", [int(tvdb_id)] + status_list) result = {} for cur_result in cur_show_results: cur_season = int(cur_result["season"]) cur_episode = int(cur_result["episode"]) if cur_season not in result: result[cur_season] = {} result[cur_season][cur_episode] = cur_result["name"] return json.dumps(result) @cherrypy.expose def episodeStatuses(self, whichStatus=None, includeSpecials=False, excludeNoAirdate=False): if whichStatus: whichStatus = int(whichStatus) status_list = [whichStatus] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER else: status_list = [] t = PageTemplate(file="manage_episodeStatuses.tmpl") t.submenu = ManageMenu t.whichStatus = whichStatus t.includeSpecials = includeSpecials t.excludeNoAirdate = excludeNoAirdate # if we have no status then this is as far as we need to go if not status_list: return _munge(t) sqlStatement = "" if not includeSpecials: sqlStatement += " AND season > 0" if excludeNoAirdate: sqlStatement += " AND airdate > 1" myDB = db.DBConnection() status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN (" + ','.join(['?'] * len(status_list)) + ")" + sqlStatement + " AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list) ep_counts = {} show_names = {} sorted_show_ids = [] for cur_status_result in status_results: cur_tvdb_id = int(cur_status_result["tvdb_id"]) if cur_tvdb_id not in ep_counts: ep_counts[cur_tvdb_id] = 1 else: ep_counts[cur_tvdb_id] += 1 show_names[cur_tvdb_id] = cur_status_result["show_name"] if cur_tvdb_id not in sorted_show_ids: sorted_show_ids.append(cur_tvdb_id) t.show_names = show_names t.ep_counts = ep_counts t.sorted_show_ids = sorted_show_ids return _munge(t) @cherrypy.expose def changeEpisodeStatuses(self, oldStatus, newStatus, opt_includeSpecials, opt_excludeNoAirdate, *args, **kwargs): status_list = [int(oldStatus)] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER to_change = {} # make a list of all shows and their associated args for arg in kwargs: tvdb_id, what = arg.split('-') # we don't care about unchecked checkboxes if kwargs[arg] != 'on': continue if tvdb_id not in to_change: to_change[tvdb_id] = [] to_change[tvdb_id].append(what) sqlStatement = "" if opt_includeSpecials == "False": sqlStatement += " AND season > 0" if opt_excludeNoAirdate == "True": sqlStatement += " AND airdate > 1" myDB = db.DBConnection() for cur_tvdb_id in to_change: # get a list of all the eps we want to change if they just said "all" if 'all' in to_change[cur_tvdb_id]: all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN (" + ','.join(['?'] * len(status_list)) + ")" + sqlStatement + " AND showid = ?", status_list + [cur_tvdb_id]) all_eps = [str(x["season"]) + 'x' + str(x["episode"]) for x in all_eps_results] to_change[cur_tvdb_id] = all_eps Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True) redirect("/manage/episodeStatuses/") @cherrypy.expose def backlogShow(self, tvdb_id): show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id)) if show_obj: sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) # @UndefinedVariable ui.notifications.message('Forced Backlog', 'Running limited backlog for ' + str(show_obj.name)) redirect("/manage/backlogOverview/") @cherrypy.expose def backlogOverview(self): t = PageTemplate(file="manage_backlogOverview.tmpl") t.submenu = ManageMenu myDB = db.DBConnection() showCounts = {} showCats = {} showSQLResults = {} for curShow in sickbeard.showList: epCounts = {} epCats = {} epCounts[Overview.SKIPPED] = 0 epCounts[Overview.WANTED] = 0 epCounts[Overview.QUAL] = 0 epCounts[Overview.GOOD] = 0 epCounts[Overview.UNAIRED] = 0 epCounts[Overview.SNATCHED] = 0 sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid]) for curResult in sqlResults: curEpCat = curShow.getOverview(int(curResult["status"])) epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat epCounts[curEpCat] += 1 showCounts[curShow.tvdbid] = epCounts showCats[curShow.tvdbid] = epCats showSQLResults[curShow.tvdbid] = sqlResults t.showCounts = showCounts t.showCats = showCats t.showSQLResults = showSQLResults return _munge(t) @cherrypy.expose def massEdit(self, toEdit=None): t = PageTemplate(file="manage_massEdit.tmpl") t.submenu = ManageMenu if not toEdit: redirect("/manage/") showIDs = toEdit.split("|") showList = [] for curID in showIDs: curID = int(curID) showObj = helpers.findCertainShow(sickbeard.showList, curID) if showObj: showList.append(showObj) flatten_folders_all_same = True last_flatten_folders = None paused_all_same = True last_paused = None skip_notices_all_same = True last_skip_notices = None quality_all_same = True last_quality = None root_dir_list = [] for curShow in showList: cur_root_dir = ek.ek(os.path.dirname, curShow._location) if cur_root_dir not in root_dir_list: root_dir_list.append(cur_root_dir) # if we know they're not all the same then no point even bothering if paused_all_same: # if we had a value already and this value is different then they're not all the same if last_paused not in (curShow.paused, None): paused_all_same = False else: last_paused = curShow.paused if skip_notices_all_same: # if we had a value already and this value is different then they're not all the same if last_skip_notices not in (curShow.skip_notices, None): skip_notices_all_same = False else: last_skip_notices = curShow.skip_notices if flatten_folders_all_same: if last_flatten_folders not in (None, curShow.flatten_folders): flatten_folders_all_same = False else: last_flatten_folders = curShow.flatten_folders if quality_all_same: if last_quality not in (None, curShow.quality): quality_all_same = False else: last_quality = curShow.quality t.showList = toEdit t.paused_value = last_paused if paused_all_same else None t.skip_notices_value = last_skip_notices if skip_notices_all_same else None t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None t.quality_value = last_quality if quality_all_same else None t.root_dir_list = root_dir_list return _munge(t) @cherrypy.expose def massEditSubmit(self, paused=None, skip_notices=None, flatten_folders=None, quality_preset=False, anyQualities=[], bestQualities=[], toEdit=None, *args, **kwargs): dir_map = {} for cur_arg in kwargs: if not cur_arg.startswith('orig_root_dir_'): continue which_index = cur_arg.replace('orig_root_dir_', '') end_dir = kwargs['new_root_dir_' + which_index] dir_map[kwargs[cur_arg]] = end_dir showIDs = toEdit.split("|") errors = [] for curShow in showIDs: curErrors = [] showObj = helpers.findCertainShow(sickbeard.showList, int(curShow)) if not showObj: continue cur_root_dir = ek.ek(os.path.dirname, showObj._location) cur_show_dir = ek.ek(os.path.basename, showObj._location) if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]: new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir) logger.log(u"For show " + showObj.name + " changing dir from " + showObj._location + " to " + new_show_dir) else: new_show_dir = showObj._location if paused == 'keep': new_paused = showObj.paused else: new_paused = True if paused == 'enable' else False new_paused = 'on' if new_paused else 'off' if skip_notices == 'keep': new_skip_notices = showObj.skip_notices else: new_skip_notices = True if skip_notices == 'enable' else False new_skip_notices = 'on' if new_skip_notices else 'off' if flatten_folders == 'keep': new_flatten_folders = showObj.flatten_folders else: new_flatten_folders = True if flatten_folders == 'enable' else False new_flatten_folders = 'on' if new_flatten_folders else 'off' if quality_preset == 'keep': anyQualities, bestQualities = Quality.splitQuality(showObj.quality) curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, new_flatten_folders, new_paused, new_skip_notices, directCall=True) if curErrors: logger.log(u"Errors: " + str(curErrors), logger.ERROR) errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>") if len(errors) > 0: ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"), " ".join(errors)) redirect("/manage/") @cherrypy.expose def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None): if toUpdate is not None: toUpdate = toUpdate.split('|') else: toUpdate = [] if toRefresh is not None: toRefresh = toRefresh.split('|') else: toRefresh = [] if toRename is not None: toRename = toRename.split('|') else: toRename = [] if toDelete is not None: toDelete = toDelete.split('|') else: toDelete = [] if toMetadata is not None: toMetadata = toMetadata.split('|') else: toMetadata = [] errors = [] refreshes = [] updates = [] renames = [] for curShowID in set(toUpdate + toRefresh + toRename + toDelete + toMetadata): if curShowID == '': continue showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID)) if showObj is None: continue if curShowID in toDelete: showObj.deleteShow() # don't do anything else if it's being deleted continue if curShowID in toUpdate: try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) # @UndefinedVariable updates.append(showObj.name) except exceptions.CantUpdateException, e: errors.append("Unable to update show " + showObj.name + ": " + ex(e)) # don't bother refreshing shows that were updated anyway if curShowID in toRefresh and curShowID not in toUpdate: try: sickbeard.showQueueScheduler.action.refreshShow(showObj) # @UndefinedVariable refreshes.append(showObj.name) except exceptions.CantRefreshException, e: errors.append("Unable to refresh show " + showObj.name + ": " + ex(e)) if curShowID in toRename: sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) # @UndefinedVariable renames.append(showObj.name) if len(errors) > 0: ui.notifications.error("Errors encountered", '<br >\n'.join(errors)) messageDetail = "" if len(updates) > 0: messageDetail += "<br /><b>Updates</b><br /><ul><li>" messageDetail += "</li><li>".join(updates) messageDetail += "</li></ul>" if len(refreshes) > 0: messageDetail += "<br /><b>Refreshes</b><br /><ul><li>" messageDetail += "</li><li>".join(refreshes) messageDetail += "</li></ul>" if len(renames) > 0: messageDetail += "<br /><b>Renames</b><br /><ul><li>" messageDetail += "</li><li>".join(renames) messageDetail += "</li></ul>" if len(updates + refreshes + renames) > 0: ui.notifications.message("The following actions were queued:", messageDetail) redirect("/manage/") class History: @cherrypy.expose def index(self, limit=100): myDB = db.DBConnection() if limit == "0": sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC") else: sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit]) t = PageTemplate(file="history.tmpl") t.historyResults = sqlResults t.limit = limit t.submenu = [ { 'title': 'Clear History', 'path': 'history/clearHistory/' }, { 'title': 'Trim History', 'path': 'history/trimHistory/' }, ] return _munge(t) @cherrypy.expose def clearHistory(self): myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE 1=1") myDB.action("VACUUM") ui.notifications.message('History cleared') redirect("/history/") @cherrypy.expose def trimHistory(self): myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE date < " + str((datetime.datetime.today() - datetime.timedelta(days=30)).strftime(history.dateFormat))) myDB.action("VACUUM") ui.notifications.message('Removed history entries greater than 30 days old') redirect("/history/") ConfigMenu = [ { 'title': 'General', 'path': 'config/general/' }, { 'title': 'Search Settings', 'path': 'config/search/' }, { 'title': 'Search Providers', 'path': 'config/providers/' }, { 'title': 'Post Processing', 'path': 'config/postProcessing/' }, { 'title': 'Notifications', 'path': 'config/notifications/' }, ] class ConfigGeneral: @cherrypy.expose def index(self): t = PageTemplate(file="config_general.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def saveRootDirs(self, rootDirString=None): sickbeard.ROOT_DIRS = rootDirString @cherrypy.expose def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities): if anyQualities: anyQualities = anyQualities.split(',') else: anyQualities = [] if bestQualities: bestQualities = bestQualities.split(',') else: bestQualities = [] newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) sickbeard.STATUS_DEFAULT = int(defaultStatus) sickbeard.QUALITY_DEFAULT = int(newQuality) sickbeard.FLATTEN_FOLDERS_DEFAULT = config.checkbox_to_value(defaultFlattenFolders) @cherrypy.expose def generateKey(self): """ Return a new randomized API_KEY """ try: from hashlib import md5 except ImportError: from md5 import md5 # Create some values to seed md5 t = str(time.time()) r = str(random.random()) # Create the md5 instance and give it the current time m = md5(t) # Update the md5 instance with the random variable m.update(r) # Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b logger.log(u"New SB API key generated") return m.hexdigest() @cherrypy.expose def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None, launch_browser=None, web_username=None, use_api=None, api_key=None, web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None): results = [] # Misc sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser) config.change_VERSION_NOTIFY(config.checkbox_to_value(version_notify)) # sickbeard.LOG_DIR is set in config.change_LOG_DIR() # Web Interface sickbeard.WEB_IPV6 = config.checkbox_to_value(web_ipv6) # sickbeard.WEB_LOG is set in config.change_LOG_DIR() if not config.change_LOG_DIR(log_dir, web_log): results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log directory not changed."] sickbeard.WEB_PORT = config.to_int(web_port, default=8081) sickbeard.WEB_USERNAME = web_username sickbeard.WEB_PASSWORD = web_password sickbeard.ENABLE_HTTPS = config.checkbox_to_value(enable_https) if not config.change_HTTPS_CERT(https_cert): results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert directory not changed."] if not config.change_HTTPS_KEY(https_key): results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key directory not changed."] # API sickbeard.USE_API = config.checkbox_to_value(use_api) sickbeard.API_KEY = api_key sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/general/") class ConfigSearch: @cherrypy.expose def index(self): t = PageTemplate(file="config_search.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None, sab_apikey=None, sab_category=None, sab_host=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_host=None, torrent_dir=None, nzb_method=None, usenet_retention=None, search_frequency=None, download_propers=None, ignore_words=None): results = [] # Episode Search sickbeard.DOWNLOAD_PROPERS = config.checkbox_to_value(download_propers) config.change_SEARCH_FREQUENCY(search_frequency) sickbeard.USENET_RETENTION = config.to_int(usenet_retention, default=500) sickbeard.IGNORE_WORDS = ignore_words # NZB Search sickbeard.USE_NZBS = config.checkbox_to_value(use_nzbs) sickbeard.NZB_METHOD = nzb_method sickbeard.SAB_HOST = config.clean_url(sab_host) sickbeard.SAB_USERNAME = sab_username sickbeard.SAB_PASSWORD = sab_password sickbeard.SAB_APIKEY = sab_apikey.strip() sickbeard.SAB_CATEGORY = sab_category if not config.change_NZB_DIR(nzb_dir): results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", directory not changed."] sickbeard.NZBGET_HOST = config.clean_url(nzbget_host) sickbeard.NZBGET_USERNAME = nzbget_username sickbeard.NZBGET_PASSWORD = nzbget_password sickbeard.NZBGET_CATEGORY = nzbget_category # Torrent Search sickbeard.USE_TORRENTS = config.checkbox_to_value(use_torrents) if not config.change_TORRENT_DIR(torrent_dir): results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", directory not changed."] sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/search/") class ConfigPostProcessing: @cherrypy.expose def index(self): t = PageTemplate(file="config_postProcessing.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None, xbmc_data=None, xbmc_12plus_data=None, mediabrowser_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None, mede8er_data=None, keep_processed_dir=None, process_automatically=None, rename_episodes=None, move_associated_files=None, filter_associated_files=None, tv_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None): results = [] # Post-Processing if not config.change_TV_DOWNLOAD_DIR(tv_download_dir): results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."] sickbeard.KEEP_PROCESSED_DIR = config.checkbox_to_value(keep_processed_dir) sickbeard.MOVE_ASSOCIATED_FILES = config.checkbox_to_value(move_associated_files) sickbeard.FILTER_ASSOCIATED_FILES = filter_associated_files sickbeard.RENAME_EPISODES = config.checkbox_to_value(rename_episodes) sickbeard.PROCESS_AUTOMATICALLY = config.checkbox_to_value(process_automatically) if sickbeard.PROCESS_AUTOMATICALLY: sickbeard.autoPostProcesserScheduler.silent = False else: sickbeard.autoPostProcesserScheduler.silent = True # Naming sickbeard.NAMING_CUSTOM_ABD = config.checkbox_to_value(naming_custom_abd) if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid": sickbeard.NAMING_PATTERN = naming_pattern sickbeard.NAMING_MULTI_EP = int(naming_multi_ep) sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders() else: results.append("You tried saving an invalid naming config, not saving your naming settings") if self.isNamingValid(naming_abd_pattern, None, True) != "invalid": sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern elif naming_custom_abd: results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings") # Metadata sickbeard.METADATA_XBMC = xbmc_data sickbeard.METADATA_XBMC_12PLUS = xbmc_12plus_data sickbeard.METADATA_MEDIABROWSER = mediabrowser_data sickbeard.METADATA_PS3 = sony_ps3_data sickbeard.METADATA_WDTV = wdtv_data sickbeard.METADATA_TIVO = tivo_data sickbeard.METADATA_MEDE8ER = mede8er_data sickbeard.metadata_provider_dict['XBMC'].set_config(sickbeard.METADATA_XBMC) sickbeard.metadata_provider_dict['XBMC 12+'].set_config(sickbeard.METADATA_XBMC_12PLUS) sickbeard.metadata_provider_dict['MediaBrowser'].set_config(sickbeard.METADATA_MEDIABROWSER) sickbeard.metadata_provider_dict['Sony PS3'].set_config(sickbeard.METADATA_PS3) sickbeard.metadata_provider_dict['WDTV'].set_config(sickbeard.METADATA_WDTV) sickbeard.metadata_provider_dict['TIVO'].set_config(sickbeard.METADATA_TIVO) sickbeard.metadata_provider_dict['Mede8er'].set_config(sickbeard.METADATA_MEDE8ER) # Save changes sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/postProcessing/") @cherrypy.expose def testNaming(self, pattern=None, multi=None, abd=False): if multi is not None: multi = int(multi) result = naming.test_name(pattern, multi, abd) result = ek.ek(os.path.join, result['dir'], result['name']) return result @cherrypy.expose def isNamingValid(self, pattern=None, multi=None, abd=False): if pattern is None: return "invalid" # air by date shows just need one check, we don't need to worry about season folders if abd: is_valid = naming.check_valid_abd_naming(pattern) require_season_folders = False else: # check validity of single and multi ep cases for the whole path is_valid = naming.check_valid_naming(pattern, multi) # check validity of single and multi ep cases for only the file name require_season_folders = naming.check_force_season_folders(pattern, multi) if is_valid and not require_season_folders: return "valid" elif is_valid and require_season_folders: return "seasonfolders" else: return "invalid" class ConfigProviders: @cherrypy.expose def index(self): t = PageTemplate(file="config_providers.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def canAddNewznabProvider(self, name): if not name: return json.dumps({'error': 'No Provider Name specified'}) providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) tempProvider = newznab.NewznabProvider(name, '') if tempProvider.getID() in providerDict: return json.dumps({'error': 'Provider Name already exists as ' + providerDict[tempProvider.getID()].name}) else: return json.dumps({'success': tempProvider.getID()}) @cherrypy.expose def saveNewznabProvider(self, name, url, key=''): if not name or not url: return '0' providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) if name in providerDict: if not providerDict[name].default: providerDict[name].name = name providerDict[name].url = config.clean_url(url) providerDict[name].key = key # a 0 in the key spot indicates that no key is needed if key == '0': providerDict[name].needs_auth = False else: providerDict[name].needs_auth = True return providerDict[name].getID() + '|' + providerDict[name].configStr() else: newProvider = newznab.NewznabProvider(name, url, key=key) sickbeard.newznabProviderList.append(newProvider) return newProvider.getID() + '|' + newProvider.configStr() @cherrypy.expose def deleteNewznabProvider(self, nnid): providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) if nnid not in providerDict or providerDict[nnid].default: return '0' # delete it from the list sickbeard.newznabProviderList.remove(providerDict[nnid]) if nnid in sickbeard.PROVIDER_ORDER: sickbeard.PROVIDER_ORDER.remove(nnid) return '1' @cherrypy.expose def saveProviders(self, newznab_string='', omgwtfnzbs_username=None, omgwtfnzbs_apikey=None, tvtorrents_digest=None, tvtorrents_hash=None, torrentleech_key=None, btn_api_key=None, hdbits_username=None, hdbits_passkey=None, provider_order=None): results = [] provider_str_list = provider_order.split() provider_list = [] newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) finishedNames = [] # add all the newznab info we got into our list if newznab_string: for curNewznabProviderStr in newznab_string.split('!!!'): if not curNewznabProviderStr: continue cur_name, cur_url, cur_key = curNewznabProviderStr.split('|') cur_url = config.clean_url(cur_url) newProvider = newznab.NewznabProvider(cur_name, cur_url, key=cur_key) cur_id = newProvider.getID() # if it already exists then update it if cur_id in newznabProviderDict: newznabProviderDict[cur_id].name = cur_name newznabProviderDict[cur_id].url = cur_url newznabProviderDict[cur_id].key = cur_key # a 0 in the key spot indicates that no key is needed if cur_key == '0': newznabProviderDict[cur_id].needs_auth = False else: newznabProviderDict[cur_id].needs_auth = True else: sickbeard.newznabProviderList.append(newProvider) finishedNames.append(cur_id) # delete anything that is missing for curProvider in sickbeard.newznabProviderList: if curProvider.getID() not in finishedNames: sickbeard.newznabProviderList.remove(curProvider) # do the enable/disable for curProviderStr in provider_str_list: curProvider, curEnabled = curProviderStr.split(':') curEnabled = config.to_int(curEnabled) provider_list.append(curProvider) if curProvider == 'womble_s_index': sickbeard.WOMBLE = curEnabled elif curProvider == 'omgwtfnzbs': sickbeard.OMGWTFNZBS = curEnabled elif curProvider == 'ezrss': sickbeard.EZRSS = curEnabled elif curProvider == 'hdbits': sickbeard.HDBITS = curEnabled elif curProvider == 'tvtorrents': sickbeard.TVTORRENTS = curEnabled elif curProvider == 'torrentleech': sickbeard.TORRENTLEECH = curEnabled elif curProvider == 'btn': sickbeard.BTN = curEnabled elif curProvider in newznabProviderDict: newznabProviderDict[curProvider].enabled = bool(curEnabled) else: logger.log(u"don't know what " + curProvider + " is, skipping") sickbeard.HDBITS_USERNAME = hdbits_username.strip() sickbeard.HDBITS_PASSKEY = hdbits_passkey.strip() sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip() sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip() sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip() sickbeard.BTN_API_KEY = btn_api_key.strip() sickbeard.OMGWTFNZBS_USERNAME = omgwtfnzbs_username.strip() sickbeard.OMGWTFNZBS_APIKEY = omgwtfnzbs_apikey.strip() sickbeard.NEWZNAB_DATA = '!!!'.join([x.configStr() for x in sickbeard.newznabProviderList]) sickbeard.PROVIDER_ORDER = provider_list sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/providers/") class ConfigNotifications: @cherrypy.expose def index(self): t = PageTemplate(file="config_notifications.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def saveNotifications(self, use_xbmc=None, xbmc_always_on=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None, use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_update_library=None, plex_server_host=None, plex_host=None, plex_username=None, plex_password=None, use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_host=None, growl_password=None, use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_api=None, prowl_priority=None, use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None, boxcar2_access_token=None, boxcar2_sound=None, use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_userkey=None, pushover_priority=None, pushover_device=None, pushover_sound=None, pushover_device_list=None, use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None, synoindex_notify_onsnatch=None, synoindex_notify_ondownload=None, synoindex_update_library=None, use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None, use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None, use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_update_library=None, pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None, use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_api=None, nma_priority=None, use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_authorizationtoken=None, pushalot_silent=None, pushalot_important=None, use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_access_token=None, pushbullet_device_iden=None, pushbullet_device_list=None, use_slack=None, slack_notify_onsnatch=None, slack_notify_ondownload=None, slack_access_token=None, slack_channel=None, slack_bot_name=None, slack_icon_url=None ): results = [] # Home Theater / NAS sickbeard.USE_XBMC = config.checkbox_to_value(use_xbmc) sickbeard.XBMC_ALWAYS_ON = config.checkbox_to_value(xbmc_always_on) sickbeard.XBMC_NOTIFY_ONSNATCH = config.checkbox_to_value(xbmc_notify_onsnatch) sickbeard.XBMC_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(xbmc_notify_ondownload) sickbeard.XBMC_UPDATE_LIBRARY = config.checkbox_to_value(xbmc_update_library) sickbeard.XBMC_UPDATE_FULL = config.checkbox_to_value(xbmc_update_full) sickbeard.XBMC_UPDATE_ONLYFIRST = config.checkbox_to_value(xbmc_update_onlyfirst) sickbeard.XBMC_HOST = config.clean_hosts(xbmc_host) sickbeard.XBMC_USERNAME = xbmc_username sickbeard.XBMC_PASSWORD = xbmc_password sickbeard.USE_PLEX = config.checkbox_to_value(use_plex) sickbeard.PLEX_NOTIFY_ONSNATCH = config.checkbox_to_value(plex_notify_onsnatch) sickbeard.PLEX_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(plex_notify_ondownload) sickbeard.PLEX_UPDATE_LIBRARY = config.checkbox_to_value(plex_update_library) sickbeard.PLEX_SERVER_HOST = config.clean_host(plex_server_host) sickbeard.PLEX_HOST = config.clean_hosts(plex_host) sickbeard.PLEX_USERNAME = plex_username sickbeard.PLEX_PASSWORD = plex_password sickbeard.USE_NMJ = config.checkbox_to_value(use_nmj) sickbeard.NMJ_HOST = config.clean_host(nmj_host) sickbeard.NMJ_DATABASE = nmj_database sickbeard.NMJ_MOUNT = nmj_mount sickbeard.USE_NMJv2 = config.checkbox_to_value(use_nmjv2) sickbeard.NMJv2_HOST = config.clean_host(nmjv2_host) sickbeard.NMJv2_DATABASE = nmjv2_database sickbeard.NMJv2_DBLOC = nmjv2_dbloc sickbeard.USE_SYNOINDEX = config.checkbox_to_value(use_synoindex) sickbeard.SYNOINDEX_NOTIFY_ONSNATCH = config.checkbox_to_value(synoindex_notify_onsnatch) sickbeard.SYNOINDEX_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(synoindex_notify_ondownload) sickbeard.SYNOINDEX_UPDATE_LIBRARY = config.checkbox_to_value(synoindex_update_library) sickbeard.USE_PYTIVO = config.checkbox_to_value(use_pytivo) # sickbeard.PYTIVO_NOTIFY_ONSNATCH = config.checkbox_to_value(pytivo_notify_onsnatch) # sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pytivo_notify_ondownload) # sickbeard.PYTIVO_UPDATE_LIBRARY = config.checkbox_to_value(pytivo_update_library) sickbeard.PYTIVO_HOST = config.clean_host(pytivo_host) sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name # Devices sickbeard.USE_GROWL = config.checkbox_to_value(use_growl) sickbeard.GROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(growl_notify_onsnatch) sickbeard.GROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(growl_notify_ondownload) sickbeard.GROWL_HOST = config.clean_host(growl_host, default_port=23053) sickbeard.GROWL_PASSWORD = growl_password sickbeard.USE_PROWL = config.checkbox_to_value(use_prowl) sickbeard.PROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(prowl_notify_onsnatch) sickbeard.PROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(prowl_notify_ondownload) sickbeard.PROWL_API = prowl_api sickbeard.PROWL_PRIORITY = config.to_int(prowl_priority) sickbeard.USE_LIBNOTIFY = config.checkbox_to_value(use_libnotify) sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = config.checkbox_to_value(libnotify_notify_onsnatch) sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(libnotify_notify_ondownload) sickbeard.USE_PUSHOVER = config.checkbox_to_value(use_pushover) sickbeard.PUSHOVER_NOTIFY_ONSNATCH = config.checkbox_to_value(pushover_notify_onsnatch) sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushover_notify_ondownload) sickbeard.PUSHOVER_USERKEY = pushover_userkey sickbeard.PUSHOVER_PRIORITY = config.to_int(pushover_priority) sickbeard.PUSHOVER_DEVICE = pushover_device sickbeard.PUSHOVER_SOUND = pushover_sound sickbeard.USE_BOXCAR2 = config.checkbox_to_value(use_boxcar2) sickbeard.BOXCAR2_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar2_notify_onsnatch) sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar2_notify_ondownload) sickbeard.BOXCAR2_ACCESS_TOKEN = boxcar2_access_token sickbeard.BOXCAR2_SOUND = boxcar2_sound sickbeard.USE_NMA = config.checkbox_to_value(use_nma) sickbeard.NMA_NOTIFY_ONSNATCH = config.checkbox_to_value(nma_notify_onsnatch) sickbeard.NMA_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(nma_notify_ondownload) sickbeard.NMA_API = nma_api sickbeard.NMA_PRIORITY = config.to_int(nma_priority) sickbeard.USE_PUSHALOT = config.checkbox_to_value(use_pushalot) sickbeard.PUSHALOT_NOTIFY_ONSNATCH = config.checkbox_to_value(pushalot_notify_onsnatch) sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushalot_notify_ondownload) sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken sickbeard.PUSHALOT_SILENT = config.checkbox_to_value(pushalot_silent) sickbeard.PUSHALOT_IMPORTANT = config.checkbox_to_value(pushalot_important) sickbeard.USE_PUSHBULLET = config.checkbox_to_value(use_pushbullet) sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = config.checkbox_to_value(pushbullet_notify_onsnatch) sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushbullet_notify_ondownload) sickbeard.PUSHBULLET_ACCESS_TOKEN = pushbullet_access_token sickbeard.PUSHBULLET_DEVICE_IDEN = pushbullet_device_iden # Online sickbeard.USE_TWITTER = config.checkbox_to_value(use_twitter) sickbeard.TWITTER_NOTIFY_ONSNATCH = config.checkbox_to_value(twitter_notify_onsnatch) sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(twitter_notify_ondownload) sickbeard.USE_TRAKT = config.checkbox_to_value(use_trakt) sickbeard.TRAKT_USERNAME = trakt_username sickbeard.TRAKT_PASSWORD = trakt_password sickbeard.TRAKT_API = trakt_api sickbeard.USE_SLACK = config.checkbox_to_value(use_slack) sickbeard.SLACK_NOTIFY_ONSNATCH = config.checkbox_to_value(slack_notify_onsnatch) sickbeard.SLACK_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(slack_notify_ondownload) sickbeard.SLACK_ACCESS_TOKEN = slack_access_token sickbeard.SLACK_CHANNEL = slack_channel sickbeard.SLACK_BOT_NAME = slack_bot_name sickbeard.SLACK_ICON_URL = slack_icon_url sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/notifications/") class ConfigHidden: @cherrypy.expose def index(self): t = PageTemplate(file="config_hidden.tmpl") t.submenu = ConfigMenu return _munge(t) @cherrypy.expose def saveHidden(self, anon_redirect=None, display_all_seasons=None, git_path=None, extra_scripts=None, create_missing_show_dirs=None, add_shows_wo_dir=None): results = [] sickbeard.ANON_REDIRECT = anon_redirect sickbeard.DISPLAY_ALL_SEASONS = config.checkbox_to_value(display_all_seasons) sickbeard.GIT_PATH = git_path sickbeard.EXTRA_SCRIPTS = [x.strip() for x in extra_scripts.split('|') if x.strip()] sickbeard.CREATE_MISSING_SHOW_DIRS = config.checkbox_to_value(create_missing_show_dirs) sickbeard.ADD_SHOWS_WO_DIR = config.checkbox_to_value(add_shows_wo_dir) sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) redirect("/config/hidden/") @cherrypy.expose def sbEnded(self, username=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() t = tvdb_api.Tvdb(**ltvdb_api_parms) results = [] errMatch = [] changeState = [] myDB = db.DBConnection() sql_result = myDB.select("SELECT tvdb_id,show_name,status FROM tv_shows WHERE status != 'Continuing' ORDER BY show_id DESC LIMIT 400") myDB.connection.close() if (len(sql_result)) > 1: logger.log(u"There were " + str(len(sql_result)) + " shows in your database that need checking (limited to 400).", logger.MESSAGE) results.append("There were <b>" + str(len(sql_result)) + "</b> shows in your database that need checking (limited to 400).<br>") else: logger.log(u"There were no shows that needed to be checked at this time.", logger.MESSAGE) results.append("There were no shows that needed to be checked at this time.<br>") for ended_show in sql_result: tvdb_id = ended_show['tvdb_id'] show_name = ended_show['show_name'] status = ended_show['status'] try: show = t[show_name] except: logger.log(u"Issue found when looking up \"%s\"" % (show_name), logger.ERROR) continue logger.log(u"Checking \"%s\" with local status \"%s\" against thetvdb" % (show_name, status), logger.MESSAGE) show_id = show['id'] if int(tvdb_id) != int(show_id): logger.log(u"Warning: Issue matching \"%s\" on tvdb. Got \"%s\" and \"%s\"" % (show_name, tvdb_id, show_id), logger.ERROR) errMatch.append("<tr><td class='tvShow'><a target='_blank' href='%s/home/displayShow?show=%s'>%s</a></td><td>%s</td><td>%s</td>" % (sickbeard.WEB_ROOT, tvdb_id, show_name, tvdb_id, show_id)) else: show_status = show['status'] if not show_status: show_status = "" if show_status != status: changeState.append("<tr><td class='tvShow'><a target='_blank' href='%s/home/displayShow?show=%s'>%s</a></td><td>%s</td><td>%s</td>" % (sickbeard.WEB_ROOT, tvdb_id, show_name, status, show_status)) show.clear() # needed to free up memory since python's garbage collection would keep this around if len(errMatch): errMatch.insert(0, "<br>These shows need to be removed then added back to Sick Beard to correct their TVDBID.<br><table class='tablesorter'><thead><tr><th>show name</th><th>local tvdbid</th><th>remote tvdbid</th></tr></thead>") errMatch.append("</table>") results += errMatch if len(changeState): changeState.insert(0, "<br>These shows need to have 'force full update' ran on them to correct their status.<br><table class='tablesorter'><thead><tr><th>show name</th><th>local status</th><th>remote status</th></tr></thead>") changeState.append("</table>") results += changeState return results class Config: @cherrypy.expose def index(self): t = PageTemplate(file="config.tmpl") t.submenu = ConfigMenu return _munge(t) general = ConfigGeneral() search = ConfigSearch() postProcessing = ConfigPostProcessing() providers = ConfigProviders() notifications = ConfigNotifications() hidden = ConfigHidden() def haveXBMC(): return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY def havePLEX(): return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY def HomeMenu(): return [ { 'title': 'Add Shows', 'path': 'home/addShows/', }, { 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' }, { 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC }, { 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX }, { 'title': 'Restart', 'path': 'home/restart/?pid=' + str(sickbeard.PID), 'confirm': True }, { 'title': 'Shutdown', 'path': 'home/shutdown/?pid=' + str(sickbeard.PID), 'confirm': True }, ] class HomePostProcess: @cherrypy.expose def index(self): t = PageTemplate(file="home_postprocess.tmpl") t.submenu = HomeMenu() return _munge(t) @cherrypy.expose def processEpisode(self, dir=None, nzbName=None, method=None, jobName=None, quiet=None, *args, **kwargs): if not dir: redirect("/home/postprocess/") else: pp_options = {} for key, value in kwargs.iteritems(): if value == 'on': value = True pp_options[key] = value result = processTV.processDir(dir, nzbName, method=method, pp_options=pp_options) if quiet is not None and int(quiet) == 1: return result result = result.replace("\n", "<br />\n") return _genericMessage("Postprocessing results", result) class NewHomeAddShows: @cherrypy.expose def index(self): t = PageTemplate(file="home_addShows.tmpl") t.submenu = HomeMenu() return _munge(t) @cherrypy.expose def getTVDBLanguages(self): result = tvdb_api.Tvdb().config['valid_languages'] # Make sure list is sorted alphabetically but 'en' is in front if 'en' in result: del result[result.index('en')] result.sort() result.insert(0, 'en') return json.dumps({'results': result}) @cherrypy.expose def sanitizeFileName(self, name): return helpers.sanitizeFileName(name) @cherrypy.expose def searchTVDBForShowName(self, name, lang="en"): if not lang or lang == 'null': lang = "en" baseURL = "http://thetvdb.com/api/GetSeries.php?" nameUTF8 = name.encode('utf-8') logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG) # Use each word in the show's name as a possible search term keywords = nameUTF8.split(' ') # Insert the whole show's name as the first search term so best results are first # ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name'] if len(keywords) > 1: keywords.insert(0, nameUTF8) # Query the TVDB for each search term and build the list of results results = [] for searchTerm in keywords: params = {'seriesname': searchTerm, 'language': lang} finalURL = baseURL + urllib.urlencode(params) logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG) urlData = helpers.getURL(finalURL) if urlData is None: # When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords logger.log(u"Unable to get URL: " + finalURL, logger.ERROR) break else: try: seriesXML = etree.ElementTree(etree.XML(urlData)) series = seriesXML.getiterator('Series') except Exception, e: # use finalURL in log, because urlData can be too much information logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR) series = '' # add each result to our list for curSeries in series: tvdb_id = int(curSeries.findtext('seriesid')) # don't add duplicates if tvdb_id in [x[0] for x in results]: continue results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired'))) lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang] return json.dumps({'results': results, 'langid': lang_id}) @cherrypy.expose def massAddTable(self, rootDir=None): t = PageTemplate(file="home_massAddTable.tmpl") t.submenu = HomeMenu() myDB = db.DBConnection() if not rootDir: return "No folders selected." elif type(rootDir) != list: root_dirs = [rootDir] else: root_dirs = rootDir root_dirs = [urllib.unquote_plus(x) for x in root_dirs] if sickbeard.ROOT_DIRS: default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) else: default_index = 0 if len(root_dirs) > default_index: tmp = root_dirs[default_index] if tmp in root_dirs: root_dirs.remove(tmp) root_dirs = [tmp] + root_dirs dir_list = [] for root_dir in root_dirs: try: file_list = ek.ek(os.listdir, root_dir) except: continue for cur_file in file_list: cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file)) if not ek.ek(os.path.isdir, cur_path): continue cur_dir = { 'dir': cur_path, 'display_dir': '<b>' + ek.ek(os.path.dirname, cur_path) + os.sep + '</b>' + ek.ek(os.path.basename, cur_path), } # see if the folder is in XBMC already dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path]) if dirResults: cur_dir['added_already'] = True else: cur_dir['added_already'] = False dir_list.append(cur_dir) tvdb_id = '' show_name = '' for cur_provider in sickbeard.metadata_provider_dict.values(): (tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path) if tvdb_id and show_name: break cur_dir['existing_info'] = (tvdb_id, show_name) if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id): cur_dir['added_already'] = True t.dirList = dir_list return _munge(t) @cherrypy.expose def newShow(self, show_to_add=None, other_shows=None): """ Display the new show page which collects a tvdb id, folder, and extra options and posts them to addNewShow """ t = PageTemplate(file="home_newShow.tmpl") t.submenu = HomeMenu() show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add) if tvdb_id and show_name: use_provided_info = True else: use_provided_info = False # tell the template whether we're giving it show name & TVDB ID t.use_provided_info = use_provided_info # use the given show_dir for the tvdb search if available if not show_dir: t.default_show_name = '' elif not show_name: t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' ') else: t.default_show_name = show_name # carry a list of other dirs if given if not other_shows: other_shows = [] elif type(other_shows) != list: other_shows = [other_shows] if use_provided_info: t.provided_tvdb_id = tvdb_id t.provided_tvdb_name = show_name t.provided_show_dir = show_dir t.other_shows = other_shows return _munge(t) @cherrypy.expose def addNewShow(self, whichSeries=None, tvdbLang="en", rootDir=None, defaultStatus=None, anyQualities=None, bestQualities=None, flatten_folders=None, fullShowPath=None, other_shows=None, skipShow=None): """ Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are provided then it forwards back to newShow, if not it goes to /home. """ # grab our list of other dirs if given if not other_shows: other_shows = [] elif type(other_shows) != list: other_shows = [other_shows] def finishAddShow(): # if there are no extra shows then go home if not other_shows: redirect("/home/") # peel off the next one next_show_dir = other_shows[0] rest_of_show_dirs = other_shows[1:] # go to add the next show return self.newShow(next_show_dir, rest_of_show_dirs) # if we're skipping then behave accordingly if skipShow: return finishAddShow() # sanity check on our inputs if (not rootDir and not fullShowPath) or not whichSeries: return "Missing params, no tvdb id or folder:" + repr(whichSeries) + " and " + repr(rootDir) + "/" + repr(fullShowPath) # figure out what show we're adding and where series_pieces = whichSeries.partition('|') if len(series_pieces) < 3: return "Error with show selection." tvdb_id = int(series_pieces[0]) show_name = series_pieces[2] # use the whole path if it's given, or else append the show name to the root dir to get the full show path if fullShowPath: show_dir = ek.ek(os.path.normpath, fullShowPath) else: show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name)) # blanket policy - if the dir exists you should have used "add existing show" numbnuts if ek.ek(os.path.isdir, show_dir) and not fullShowPath: ui.notifications.error("Unable to add show", "Folder " + show_dir + " exists already") redirect("/home/addShows/existingShows/") # don't create show dir if config says not to if sickbeard.ADD_SHOWS_WO_DIR: logger.log(u"Skipping initial creation of " + show_dir + " due to config.ini setting") else: dir_exists = helpers.makeDir(show_dir) if not dir_exists: logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR) ui.notifications.error("Unable to add show", "Unable to create the folder " + show_dir + ", can't add the show") redirect("/home/") else: helpers.chmodAsParent(show_dir) # prepare the inputs for passing along flatten_folders = config.checkbox_to_value(flatten_folders) if not anyQualities: anyQualities = [] if not bestQualities: bestQualities = [] if type(anyQualities) != list: anyQualities = [anyQualities] if type(bestQualities) != list: bestQualities = [bestQualities] newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) # add the show sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, tvdbLang) # @UndefinedVariable ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir) return finishAddShow() @cherrypy.expose def existingShows(self): """ Prints out the page to add existing shows from a root dir """ t = PageTemplate(file="home_addExistingShow.tmpl") t.submenu = HomeMenu() return _munge(t) def split_extra_show(self, extra_show): if not extra_show: return (None, None, None) split_vals = extra_show.split('|') if len(split_vals) < 3: return (extra_show, None, None) show_dir = split_vals[0] tvdb_id = split_vals[1] show_name = '|'.join(split_vals[2:]) return (show_dir, tvdb_id, show_name) @cherrypy.expose def addExistingShows(self, shows_to_add=None, promptForSettings=None): """ Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards along to the newShow page. """ # grab a list of other shows to add, if provided if not shows_to_add: shows_to_add = [] elif type(shows_to_add) != list: shows_to_add = [shows_to_add] shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add] promptForSettings = config.checkbox_to_value(promptForSettings) tvdb_id_given = [] dirs_only = [] # separate all the ones with TVDB IDs for cur_dir in shows_to_add: if not '|' in cur_dir: dirs_only.append(cur_dir) else: show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir) if not show_dir or not tvdb_id or not show_name: continue tvdb_id_given.append((show_dir, int(tvdb_id), show_name)) # if they want me to prompt for settings then I will just carry on to the newShow page if promptForSettings and shows_to_add: return self.newShow(shows_to_add[0], shows_to_add[1:]) # if they don't want me to prompt for settings then I can just add all the nfo shows now num_added = 0 for cur_show in tvdb_id_given: show_dir, tvdb_id, show_name = cur_show # add the show sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, SKIPPED, sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT) # @UndefinedVariable num_added += 1 if num_added: ui.notifications.message("Shows Added", "Automatically added " + str(num_added) + " from their existing metadata files") # if we're done then go home if not dirs_only: redirect("/home/") # for the remaining shows we need to prompt for each one, so forward this on to the newShow page return self.newShow(dirs_only[0], dirs_only[1:]) ErrorLogsMenu = [ { 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors/' }, #{ 'title': 'View Log', 'path': 'errorlogs/viewlog' }, ] class ErrorLogs: @cherrypy.expose def index(self): t = PageTemplate(file="errorlogs.tmpl") t.submenu = ErrorLogsMenu return _munge(t) @cherrypy.expose def clearerrors(self): classes.ErrorViewer.clear() redirect("/errorlogs/") @cherrypy.expose def viewlog(self, minLevel=logger.MESSAGE, maxLines=500): t = PageTemplate(file="viewlogs.tmpl") t.submenu = ErrorLogsMenu minLevel = int(minLevel) data = [] if os.path.isfile(logger.sb_log_instance.log_file_path): with ek.ek(open, logger.sb_log_instance.log_file_path) as f: data = f.readlines() regex = "^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$" finalData = [] numLines = 0 lastLine = False numToShow = min(maxLines, len(data)) for x in reversed(data): x = x.decode('utf-8') match = re.match(regex, x) if match: level = match.group(7) if level not in logger.reverseNames: lastLine = False continue if logger.reverseNames[level] >= minLevel: lastLine = True finalData.append(x) else: lastLine = False continue elif lastLine: finalData.append("AA" + x) numLines += 1 if numLines >= numToShow: break result = "".join(finalData) t.logLines = result t.minLevel = minLevel return _munge(t) class Home: @cherrypy.expose def is_alive(self, *args, **kwargs): if 'callback' in kwargs and '_' in kwargs: callback, _ = kwargs['callback'], kwargs['_'] else: return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring." cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" cherrypy.response.headers['Content-Type'] = 'text/javascript' cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with' if sickbeard.started: return callback + '(' + json.dumps({"msg": str(sickbeard.PID)}) + ');' else: return callback + '(' + json.dumps({"msg": "nope"}) + ');' @cherrypy.expose def index(self): t = PageTemplate(file="home.tmpl") t.submenu = HomeMenu() return _munge(t) addShows = NewHomeAddShows() postprocess = HomePostProcess() @cherrypy.expose def testSABnzbd(self, host=None, username=None, password=None, apikey=None): host = config.clean_url(host) connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey) if connection: authed, authMsg = sab.testAuthentication(host, username, password, apikey) # @UnusedVariable if authed: return "Success. Connected and authenticated" else: return "Authentication failed. SABnzbd expects '" + accesMsg + "' as authentication method" else: return "Unable to connect to host" @cherrypy.expose def testGrowl(self, host=None, password=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_host(host, default_port=23053) result = notifiers.growl_notifier.test_notify(host, password) if password is None or password == '': pw_append = '' else: pw_append = " with password: " + password if result: return "Registered and Tested growl successfully " + urllib.unquote_plus(host) + pw_append else: return "Registration and Testing of growl failed " + urllib.unquote_plus(host) + pw_append @cherrypy.expose def testProwl(self, prowl_api=None, prowl_priority=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority) if result: return "Test prowl notice sent successfully" else: return "Test prowl notice failed" @cherrypy.expose def testBoxcar2(self, accessToken=None, sound=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.boxcar2_notifier.test_notify(accessToken, sound) if result: return "Boxcar2 notification succeeded. Check your Boxcar2 clients to make sure it worked" else: return "Error sending Boxcar2 notification" @cherrypy.expose def testPushover(self, userKey=None, priority=None, device=None, sound=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.pushover_notifier.test_notify(userKey, priority, device, sound) if result: return "Pushover notification succeeded. Check your Pushover clients to make sure it worked" else: return "Error sending Pushover notification" @cherrypy.expose def getPushoverDevices(self, userKey=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.pushover_notifier.get_devices(userKey) if result: return result else: return "{}" @cherrypy.expose def twitterStep1(self): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" return notifiers.twitter_notifier._get_authorization() @cherrypy.expose def twitterStep2(self, key): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.twitter_notifier._get_credentials(key) logger.log(u"result: " + str(result)) if result: return "Key verification successful" else: return "Unable to verify key" @cherrypy.expose def testTwitter(self): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.twitter_notifier.test_notify() if result: return "Tweet successful, check your twitter to make sure it worked" else: return "Error sending Tweet" @cherrypy.expose def testXBMC(self, host=None, username=None, password=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_hosts(host) finalResult = '' for curHost in [x.strip() for x in host.split(",")]: curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password) if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]: finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost) else: finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost) finalResult += "<br />\n" return finalResult @cherrypy.expose def testPLEX(self, host=None, username=None, password=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_hosts(host) finalResult = '' for curHost in [x.strip() for x in host.split(",")]: curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password) if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]: finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost) else: finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost) finalResult += "<br />\n" return finalResult @cherrypy.expose def testLibnotify(self): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" if notifiers.libnotify_notifier.test_notify(): return "Tried sending desktop notification via libnotify" else: return notifiers.libnotify.diagnose() @cherrypy.expose def testNMJ(self, host=None, database=None, mount=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_host(host) result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount) if result: return "Successfully started the scan update for NMJ" else: return "Failed to start the scan update for NMJ" @cherrypy.expose def settingsNMJ(self, host=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_host(host) result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host)) if result: return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT} else: return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}' @cherrypy.expose def testNMJv2(self, host=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_host(host) result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host)) if result: return "Successfully started the scan update for NMJv2" else: return "Failed to start the scan update for NMJv2" @cherrypy.expose def settingsNMJv2(self, host=None, dbloc=None, instance=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" host = config.clean_host(host) result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance) if result: return '{"message": "NMJv2 Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE} else: return '{"message": "Unable to find NMJv2 Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc} @cherrypy.expose def testTrakt(self, api=None, username=None, password=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.trakt_notifier.test_notify(api, username, password) if result: return "Test notice sent successfully to Trakt" else: return "Test notice failed to Trakt" @cherrypy.expose def testNMA(self, nma_api=None, nma_priority=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.nma_notifier.test_notify(nma_api, nma_priority) if result: return "Test NMA notice sent successfully" else: return "Test NMA notice failed" @cherrypy.expose def testPushalot(self, authtoken=None, silent=None, important=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.pushalot_notifier.test_notify(authtoken, silent, important) if result: return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked" else: return "Error sending Pushalot notification" @cherrypy.expose def testSynoNotify(self): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.synoindex_notifier.test_notify() if result: return "Test Synology notice sent successfully" else: return "Test Synology notice failed" @cherrypy.expose def testPushbullet(self, accessToken=None, device_iden=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.pushbullet_notifier.test_notify(accessToken, device_iden) if result: return "Pushbullet notification succeeded. Check your Pushbullet clients to make sure it worked" else: return "Error sending Pushbullet notification" @cherrypy.expose def getPushbulletDevices(self, accessToken=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.pushbullet_notifier.get_devices(accessToken) if result: return result else: return "{}" @cherrypy.expose def testSlack(self, accessToken=None, channel=None, bot_name=None, icon_url=None): cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" result = notifiers.slack_notifier.test_notify(accessToken, channel, bot_name, icon_url) if result: return "Slack notification succeeded. Check your Slack clients to make sure it worked" else: return "Error sending Slack notification" @cherrypy.expose def shutdown(self, pid=None): if str(pid) != str(sickbeard.PID): redirect("/home/") threading.Timer(2, sickbeard.invoke_shutdown).start() title = "Shutting down" message = "Sick Beard is shutting down..." return _genericMessage(title, message) @cherrypy.expose def restart(self, pid=None): if str(pid) != str(sickbeard.PID): redirect("/home/") t = PageTemplate(file="restart.tmpl") t.submenu = HomeMenu() # do a soft restart threading.Timer(2, sickbeard.invoke_restart, [False]).start() return _munge(t) @cherrypy.expose def update(self, pid=None): if str(pid) != str(sickbeard.PID): redirect("/home/") updated = sickbeard.versionCheckScheduler.action.update() # @UndefinedVariable if updated: # do a hard restart threading.Timer(2, sickbeard.invoke_restart, [False]).start() t = PageTemplate(file="restart_bare.tmpl") return _munge(t) else: return _genericMessage("Update Failed", "Update wasn't successful, not restarting. Check your log for more information.") @cherrypy.expose def displayShow(self, show=None): if show is None: return _genericMessage("Error", "Invalid show ID") else: showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return _genericMessage("Error", "Show not in show list") myDB = db.DBConnection() seasonResults = myDB.select( "SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc", [showObj.tvdbid] ) sqlResults = myDB.select( "SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [showObj.tvdbid] ) t = PageTemplate(file="displayShow.tmpl") t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid } ] try: t.showLoc = (showObj.location, True) except sickbeard.exceptions.ShowDirNotFoundException: t.showLoc = (showObj._location, False) show_message = '' if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): # @UndefinedVariable show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.' elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): # @UndefinedVariable show_message = 'The information below is in the process of being updated.' elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): # @UndefinedVariable show_message = 'The episodes below are currently being refreshed from disk' elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): # @UndefinedVariable show_message = 'This show is queued to be refreshed.' elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): # @UndefinedVariable show_message = 'This show is queued and awaiting an update.' if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): # @UndefinedVariable if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): # @UndefinedVariable t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d' % showObj.tvdbid, 'confirm': True }) t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d' % showObj.tvdbid }) t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&amp;force=1' % showObj.tvdbid }) t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?show=%d' % showObj.tvdbid, 'requires': haveXBMC }) t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d' % showObj.tvdbid }) t.show = showObj t.sqlResults = sqlResults t.seasonResults = seasonResults t.show_message = show_message epCounts = {} epCats = {} epCounts[Overview.SKIPPED] = 0 epCounts[Overview.WANTED] = 0 epCounts[Overview.QUAL] = 0 epCounts[Overview.GOOD] = 0 epCounts[Overview.UNAIRED] = 0 epCounts[Overview.SNATCHED] = 0 for curResult in sqlResults: curEpCat = showObj.getOverview(int(curResult["status"])) epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat epCounts[curEpCat] += 1 def titler(x): if not x: return x if not x.lower().startswith('a to ') and x.lower().startswith('a '): x = x[2:] elif x.lower().startswith('an '): x = x[3:] elif x.lower().startswith('the '): x = x[4:] return x t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name))) t.epCounts = epCounts t.epCats = epCats return _munge(t) @cherrypy.expose def plotDetails(self, show, season, episode): result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone() return result['description'] if result else 'Episode not found.' @cherrypy.expose def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], flatten_folders=None, paused=None, skip_notices=None, directCall=False, air_by_date=None, tvdbLang=None, rls_ignore_words=None, rls_require_words=None): if show is None: errString = "Invalid show ID: " + str(show) if directCall: return [errString] else: return _genericMessage("Error", errString) showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: errString = "Unable to find the specified show: " + str(show) if directCall: return [errString] else: return _genericMessage("Error", errString) if not location and not anyQualities and not bestQualities and not flatten_folders: t = PageTemplate(file="editShow.tmpl") t.submenu = HomeMenu() with showObj.lock: t.show = showObj return _munge(t) flatten_folders = config.checkbox_to_value(flatten_folders) paused = config.checkbox_to_value(paused) skip_notices = config.checkbox_to_value(skip_notices) air_by_date = config.checkbox_to_value(air_by_date) if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']: tvdb_lang = tvdbLang else: tvdb_lang = showObj.lang # if we changed the language then kick off an update if tvdb_lang == showObj.lang: do_update = False else: do_update = True if type(anyQualities) != list: anyQualities = [anyQualities] if type(bestQualities) != list: bestQualities = [bestQualities] errors = [] with showObj.lock: newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) showObj.quality = newQuality # reversed for now if bool(showObj.flatten_folders) != bool(flatten_folders): showObj.flatten_folders = flatten_folders try: sickbeard.showQueueScheduler.action.refreshShow(showObj) # @UndefinedVariable except exceptions.CantRefreshException, e: errors.append("Unable to refresh this show: " + ex(e)) showObj.paused = paused showObj.skip_notices = skip_notices # if this routine was called via the mass edit, do not change the options that are not passed if not directCall: showObj.air_by_date = air_by_date showObj.lang = tvdb_lang showObj.rls_ignore_words = rls_ignore_words.strip() showObj.rls_require_words = rls_require_words.strip() # if we change location clear the db of episodes, change it, write to db, and rescan if os.path.normpath(showObj._location) != os.path.normpath(location): logger.log(os.path.normpath(showObj._location) + " != " + os.path.normpath(location), logger.DEBUG) if not ek.ek(os.path.isdir, location): errors.append("New location <tt>%s</tt> does not exist" % location) # don't bother if we're going to update anyway elif not do_update: # change it try: showObj.location = location try: sickbeard.showQueueScheduler.action.refreshShow(showObj) # @UndefinedVariable except exceptions.CantRefreshException, e: errors.append("Unable to refresh this show:" + ex(e)) # grab updated info from TVDB #showObj.loadEpisodesFromTVDB() # rescan the episodes in the new folder except exceptions.NoNFOException: errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location) # save it to the DB showObj.saveToDB() # force the update if do_update: try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) # @UndefinedVariable time.sleep(1) except exceptions.CantUpdateException, e: errors.append("Unable to force an update on the show.") if directCall: return errors if len(errors) > 0: ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"), '<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>") redirect("/home/displayShow?show=" + show) @cherrypy.expose def deleteShow(self, show=None): if show is None: return _genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return _genericMessage("Error", "Unable to find the specified show") if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): # @UndefinedVariable return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.") showObj.deleteShow() ui.notifications.message('<b>%s</b> has been deleted' % showObj.name) redirect("/home/") @cherrypy.expose def refreshShow(self, show=None): if show is None: return _genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return _genericMessage("Error", "Unable to find the specified show") # force the update from the DB try: sickbeard.showQueueScheduler.action.refreshShow(showObj) # @UndefinedVariable except exceptions.CantRefreshException, e: ui.notifications.error("Unable to refresh this show.", ex(e)) time.sleep(3) redirect("/home/displayShow?show=" + str(showObj.tvdbid)) @cherrypy.expose def updateShow(self, show=None, force=0): if show is None: return _genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return _genericMessage("Error", "Unable to find the specified show") # force the update try: sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) # @UndefinedVariable except exceptions.CantUpdateException, e: ui.notifications.error("Unable to update this show.", ex(e)) # just give it some time time.sleep(3) redirect("/home/displayShow?show=" + str(showObj.tvdbid)) @cherrypy.expose def updateXBMC(self, show=None): if sickbeard.XBMC_UPDATE_ONLYFIRST: # only send update to first host in the list -- workaround for xbmc sql backend users host = sickbeard.XBMC_HOST.split(",")[0].strip() else: host = sickbeard.XBMC_HOST if show: show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) else: show_obj = None if notifiers.xbmc_notifier.update_library(show_obj=show_obj): ui.notifications.message("Library update command sent to XBMC host(s): " + host) else: ui.notifications.error("Unable to contact one or more XBMC host(s): " + host) redirect("/home/") @cherrypy.expose def updatePLEX(self): if notifiers.plex_notifier.update_library(): ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST) else: ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST) redirect("/home/") @cherrypy.expose def setStatus(self, show=None, eps=None, status=None, direct=False): if show is None or eps is None or status is None: errMsg = "You must specify a show and at least one episode" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return _genericMessage("Error", errMsg) if not statusStrings.has_key(int(status)): errMsg = "Invalid status" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return _genericMessage("Error", errMsg) showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: errMsg = "Error", "Show not in show list" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return _genericMessage("Error", errMsg) segment_list = [] if eps is not None: for curEp in eps.split('|'): logger.log(u"Attempting to set status on episode " + curEp + " to " + status, logger.DEBUG) epInfo = curEp.split('x') epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1])) if int(status) == WANTED: # figure out what segment the episode is in and remember it so we can backlog it if epObj.show.air_by_date: ep_segment = str(epObj.airdate)[:7] else: ep_segment = epObj.season if ep_segment not in segment_list: segment_list.append(ep_segment) if epObj is None: return _genericMessage("Error", "Episode couldn't be retrieved") with epObj.lock: # don't let them mess up UNAIRED episodes if epObj.status == UNAIRED: logger.log(u"Refusing to change status of " + curEp + " because it is UNAIRED", logger.ERROR) continue if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location): logger.log(u"Refusing to change status of " + curEp + " to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR) continue epObj.status = int(status) epObj.saveToDB() msg = "Backlog was automatically started for the following seasons of <b>" + showObj.name + "</b>:<br /><ul>" for cur_segment in segment_list: msg += "<li>Season " + str(cur_segment) + "</li>" logger.log(u"Sending backlog for " + showObj.name + " season " + str(cur_segment) + " because some eps were set to wanted") cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment) sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) # @UndefinedVariable msg += "</ul>" if segment_list: ui.notifications.message("Backlog started", msg) if direct: return json.dumps({'result': 'success'}) else: redirect("/home/displayShow?show=" + show) @cherrypy.expose def testRename(self, show=None): if show is None: return _genericMessage("Error", "You must specify a show") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return _genericMessage("Error", "Show not in show list") try: show_loc = showObj.location # @UnusedVariable except exceptions.ShowDirNotFoundException: return _genericMessage("Error", "Can't rename episodes when the show dir is missing.") ep_obj_rename_list = [] ep_obj_list = showObj.getAllEpisodes(has_location=True) for cur_ep_obj in ep_obj_list: # Only want to rename if we have a location if cur_ep_obj.location: if cur_ep_obj.relatedEps: # do we have one of multi-episodes in the rename list already have_already = False for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]: if cur_related_ep in ep_obj_rename_list: have_already = True break if not have_already: ep_obj_rename_list.append(cur_ep_obj) else: ep_obj_rename_list.append(cur_ep_obj) if ep_obj_rename_list: # present season DESC episode DESC on screen ep_obj_rename_list.reverse() t = PageTemplate(file="testRename.tmpl") t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}] t.ep_obj_list = ep_obj_rename_list t.show = showObj return _munge(t) @cherrypy.expose def doRename(self, show=None, eps=None): if show is None or eps is None: errMsg = "You must specify a show and at least one episode" return _genericMessage("Error", errMsg) show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if show_obj is None: errMsg = "Error", "Show not in show list" return _genericMessage("Error", errMsg) try: show_loc = show_obj.location # @UnusedVariable except exceptions.ShowDirNotFoundException: return _genericMessage("Error", "Can't rename episodes when the show dir is missing.") myDB = db.DBConnection() if eps is None: redirect("/home/displayShow?show=" + show) for curEp in eps.split('|'): epInfo = curEp.split('x') # this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]]) if not ep_result: logger.log(u"Unable to find an episode for " + curEp + ", skipping", logger.WARNING) continue related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]]) root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1])) root_ep_obj.relatedEps = [] for cur_related_ep in related_eps_result: related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"])) if related_ep_obj not in root_ep_obj.relatedEps: root_ep_obj.relatedEps.append(related_ep_obj) root_ep_obj.rename() redirect("/home/displayShow?show=" + show) @cherrypy.expose def searchEpisode(self, show=None, season=None, episode=None): # retrieve the episode object and fail if we can't get one ep_obj = _getEpisode(show, season, episode) if isinstance(ep_obj, str): return json.dumps({'result': 'failure'}) # make a queue item for it and put it on the queue ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj) sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) # @UndefinedVariable # wait until the queue item tells us whether it worked or not while ep_queue_item.success is None: # @UndefinedVariable time.sleep(1) # return the correct json value if ep_queue_item.success: return json.dumps({'result': statusStrings[ep_obj.status]}) return json.dumps({'result': 'failure'}) class UI: @cherrypy.expose def add_message(self): ui.notifications.message('Test 1', 'This is test number 1') ui.notifications.error('Test 2', 'This is test number 2') return "ok" @cherrypy.expose def get_messages(self): messages = {} cur_notification_num = 1 for cur_notification in ui.notifications.get_notifications(): messages['notification-' + str(cur_notification_num)] = {'title': cur_notification.title, 'message': cur_notification.message, 'type': cur_notification.type} cur_notification_num += 1 return json.dumps(messages) class WebInterface: @cherrypy.expose def robots_txt(self): """ Keep web crawlers out """ cherrypy.response.headers['Content-Type'] = 'text/plain' return 'User-agent: *\nDisallow: /\n' @cherrypy.expose def index(self): redirect("/home/") @cherrypy.expose def showPoster(self, show=None, which=None): if which == 'poster': default_image_name = 'poster.png' else: default_image_name = 'banner.png' default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', default_image_name) if show is None: return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png") else: showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png") cache_obj = image_cache.ImageCache() if which == 'poster': image_file_name = cache_obj.poster_path(showObj.tvdbid) # this is for 'banner' but also the default case else: image_file_name = cache_obj.banner_path(showObj.tvdbid) if ek.ek(os.path.isfile, image_file_name): # use startup argument to prevent using PIL even if installed if sickbeard.NO_RESIZE: return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg") try: from PIL import Image from cStringIO import StringIO except ImportError: # PIL isn't installed return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg") else: im = Image.open(image_file_name) if im.mode == 'P': # Convert GIFs to RGB im = im.convert('RGB') if which == 'banner': size = 606, 112 elif which == 'poster': size = 136, 200 else: return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg") im = im.resize(size, Image.ANTIALIAS) imgbuffer = StringIO() im.save(imgbuffer, 'JPEG', quality=85) cherrypy.response.headers['Content-Type'] = 'image/jpeg' return imgbuffer.getvalue() else: return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png") @cherrypy.expose def setComingEpsLayout(self, layout): if layout not in ('poster', 'banner', 'list'): layout = 'banner' sickbeard.COMING_EPS_LAYOUT = layout redirect("/comingEpisodes/") @cherrypy.expose def toggleComingEpsDisplayPaused(self): sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED redirect("/comingEpisodes/") @cherrypy.expose def setComingEpsSort(self, sort): if sort not in ('date', 'network', 'show'): sort = 'date' sickbeard.COMING_EPS_SORT = sort redirect("/comingEpisodes/") @cherrypy.expose def comingEpisodes(self, layout="None"): myDB = db.DBConnection() today = datetime.date.today().toordinal() next_week = (datetime.date.today() + datetime.timedelta(days=7)).toordinal() recently = (datetime.date.today() - datetime.timedelta(days=3)).toordinal() done_show_list = [] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season > 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, next_week] + qualList) for cur_result in sql_results: done_show_list.append(int(cur_result["showid"])) more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season > 0 AND showid NOT IN (" + ','.join(['?'] * len(done_show_list)) + ") AND tv_shows.tvdb_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season > 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED) sql_results += more_sql_results more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season > 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) sql_results += more_sql_results # sort by air date sorts = { 'date': (lambda x, y: cmp(int(x["airdate"]), int(y["airdate"]))), 'show': (lambda a, b: cmp(a["show_name"], b["show_name"])), 'network': (lambda a, b: cmp(a["network"], b["network"])), } sql_results.sort(sorts[sickbeard.COMING_EPS_SORT]) t = PageTemplate(file="comingEpisodes.tmpl") paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' } paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused' t.submenu = [ { 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date', 'Show': 'setComingEpsSort/?sort=show', 'Network': 'setComingEpsSort/?sort=network', }}, { 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner', 'Poster': 'setComingEpsLayout/?layout=poster', 'List': 'setComingEpsLayout/?layout=list', }}, paused_item, ] t.next_week = next_week t.today = today t.sql_results = sql_results # allow local overriding of layout parameter if layout and layout in ('poster', 'banner', 'list'): t.layout = layout else: t.layout = sickbeard.COMING_EPS_LAYOUT return _munge(t) manage = Manage() history = History() config = Config() home = Home() api = Api() browser = browser.WebFileBrowser() errorlogs = ErrorLogs() ui = UI()
117,191
Python
.py
2,176
41.111673
603
0.606327
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,328
showUpdater.py
midgetspy_Sick-Beard/sickbeard/showUpdater.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os import sickbeard from sickbeard import logger from sickbeard import exceptions from sickbeard import ui from sickbeard.exceptions import ex from sickbeard import encodingKludge as ek from sickbeard import db class ShowUpdater(): def run(self, force=False): logger.log(u"Doing full update on all shows") update_datetime = datetime.datetime.today() update_date = update_datetime.date() # clean out cache directory, remove everything > 12 hours old if sickbeard.CACHE_DIR: cache_dir = sickbeard.TVDB_API_PARMS['cache'] logger.log(u"Trying to clean cache folder " + cache_dir) # Does our cache_dir exists if not ek.ek(os.path.isdir, cache_dir): logger.log(u"Can't clean " + cache_dir + " if it doesn't exist", logger.WARNING) else: max_age = datetime.timedelta(hours=12) # Get all our cache files cache_files = ek.ek(os.listdir, cache_dir) for cache_file in cache_files: cache_file_path = ek.ek(os.path.join, cache_dir, cache_file) if ek.ek(os.path.isfile, cache_file_path): cache_file_modified = datetime.datetime.fromtimestamp(ek.ek(os.path.getmtime, cache_file_path)) if update_datetime - cache_file_modified > max_age: try: ek.ek(os.remove, cache_file_path) except OSError, e: logger.log(u"Unable to clean " + cache_dir + ": " + repr(e) + " / " + str(e), logger.WARNING) break # select 10 'Ended' tv_shows updated more than 90 days ago to include in this update stale_should_update = [] stale_update_date = (update_date - datetime.timedelta(days=90)).toordinal() myDB = db.DBConnection() # last_update_date <= 90 days, sorted ASC because dates are ordinal sql_result = myDB.select("SELECT tvdb_id FROM tv_shows WHERE status = 'Ended' AND last_update_tvdb <= ? ORDER BY last_update_tvdb ASC LIMIT 10;", [stale_update_date]) for cur_result in sql_result: stale_should_update.append(cur_result['tvdb_id']) # start update process piList = [] for curShow in sickbeard.showList: try: # if should_update returns True (not 'Ended') or show is selected stale 'Ended' then update, otherwise just refresh if curShow.should_update(update_date=update_date) or curShow.tvdbid in stale_should_update: curQueueItem = sickbeard.showQueueScheduler.action.updateShow(curShow, True) # @UndefinedVariable else: logger.log(u"Not updating episodes for show " + curShow.name + " because it's marked as ended and last/next episode is not within the grace period.", logger.DEBUG) curQueueItem = sickbeard.showQueueScheduler.action.refreshShow(curShow, True) # @UndefinedVariable piList.append(curQueueItem) except (exceptions.CantUpdateException, exceptions.CantRefreshException), e: logger.log(u"Automatic update failed: " + ex(e), logger.ERROR) ui.ProgressIndicators.setIndicator('dailyUpdate', ui.QueueProgressIndicator("Daily Update", piList))
4,303
Python
.py
74
46.283784
184
0.636972
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,329
exceptions.py
midgetspy_Sick-Beard/sickbeard/exceptions.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from sickbeard.encodingKludge import fixStupidEncodings def ex(e): """ Returns a unicode string from the exception text if it exists. """ e_message = u"" if not e or not e.args: return e_message for arg in e.args: if arg is not None: if isinstance(arg, (str, unicode)): fixed_arg = fixStupidEncodings(arg, True) else: try: fixed_arg = u"error " + fixStupidEncodings(str(arg), True) except: fixed_arg = None if fixed_arg: if not e_message: e_message = fixed_arg else: e_message = e_message + " : " + fixed_arg return e_message class SickBeardException(Exception): "Generic SickBeard Exception - should never be thrown, only subclassed" class ConfigErrorException(SickBeardException): "Error in the config file" class LaterException(SickBeardException): "Something bad happened that I'll make a real exception for later" class NoNFOException(SickBeardException): "No NFO was found!" class NoShowDirException(SickBeardException): "Unable to find the show's directory" class FileNotFoundException(SickBeardException): "The specified file doesn't exist" class MultipleDBEpisodesException(SickBeardException): "Found multiple episodes in the DB! Must fix DB first" class MultipleDBShowsException(SickBeardException): "Found multiple shows in the DB! Must fix DB first" class MultipleShowObjectsException(SickBeardException): "Found multiple objects for the same show! Something is very wrong" class WrongShowException(SickBeardException): "The episode doesn't belong to the same show as its parent folder" class ShowNotFoundException(SickBeardException): "The show wasn't found on theTVDB" class EpisodeNotFoundException(SickBeardException): "The episode wasn't found on theTVDB" class NewzbinAPIThrottled(SickBeardException): "Newzbin has throttled us, deal with it" class TVRageException(SickBeardException): "TVRage API did something bad" class ShowDirNotFoundException(SickBeardException): "The show dir doesn't exist" class AuthException(SickBeardException): "Your authentication information is incorrect" class EpisodeDeletedException(SickBeardException): "This episode has been deleted" class CantRefreshException(SickBeardException): "The show can't be refreshed right now" class CantUpdateException(SickBeardException): "The show can't be updated right now" class PostProcessingFailed(SickBeardException): "Post-processing the episode failed"
3,598
Python
.py
80
37.675
79
0.721734
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,330
tvrage.py
midgetspy_Sick-Beard/sickbeard/tvrage.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import datetime import traceback import sickbeard from sickbeard import logger from sickbeard.common import UNAIRED from sickbeard import db from sickbeard import exceptions, helpers from sickbeard.exceptions import ex from lib.tvdb_api import tvdb_api, tvdb_exceptions class TVRage: def __init__(self, show): self.show = show self.lastEpInfo = None self.nextEpInfo = None self._tvrid = 0 self._tvrname = None if self.show.tvrid == 0: # if it's the right show then use the tvrage ID that the last lookup found (cached in self._trvid) show_is_right = self.confirmShow() or self.checkSync() if not show_is_right: raise exceptions.TVRageException("Shows aren't the same, aborting") if self._tvrid == 0 or self._tvrname == None: raise exceptions.TVRageException("We confirmed sync but got invalid data (no ID/name)") if show_is_right: logger.log(u"Setting TVRage ID for " + show.name + " to " + str(self._tvrid)) self.show.tvrid = self._tvrid self.show.saveToDB() if not self.show.tvrname: if self._tvrname == None: self._getTVRageInfo() logger.log(u"Setting TVRage Show Name for " + show.name + " to " + self._tvrname) self.show.tvrname = self._tvrname self.show.saveToDB() def confirmShow(self, force=False): if self.show.tvrid != 0 and not force: logger.log(u"We already have a TVRage ID, skipping confirmation", logger.DEBUG) return True logger.log(u"Checking the first episode of each season to see if the air dates match between TVDB and TVRage") tvdb_lang = self.show.lang try: try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(**ltvdb_api_parms) except tvdb_exceptions.tvdb_exception, e: logger.log(u"Currently this doesn't work with TVDB down but with some DB magic it can be added", logger.DEBUG) return None # check the first episode of every season for curSeason in t[self.show.tvdbid]: logger.log(u"Checking TVDB and TVRage sync for season " + str(curSeason), logger.DEBUG) airdate = None try: # don't do specials and don't do seasons with no episode 1 if curSeason == 0 or 1 not in t[self.show.tvdbid]: continue # get the episode info from TVDB ep = t[self.show.tvdbid][curSeason][1] # make sure we have a date to compare with if ep["firstaired"] == "" or ep["firstaired"] == None or ep["firstaired"] == "0000-00-00": continue # get a datetime object rawAirdate = [int(x) for x in ep["firstaired"].split("-")] airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2]) # get the episode info from TVRage info = self._getTVRageInfo(curSeason, 1) # make sure we have enough info if info == None or not info.has_key('Episode Info'): logger.log(u"TVRage doesn't have the episode info, skipping it", logger.DEBUG) continue # parse the episode info curEpInfo = self._getEpInfo(info['Episode Info']) # make sure we got some info back if curEpInfo == None: continue # if we couldn't compare with TVDB try comparing it with the local database except tvdb_exceptions.tvdb_exception, e: logger.log(u"Unable to check TVRage info against TVDB: " + ex(e)) logger.log(u"Trying against DB instead", logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? and episode = ?", [self.show.tvdbid, self.lastEpInfo['season'], self.lastEpInfo['episode']]) if len(sqlResults) == 0: raise exceptions.EpisodeNotFoundException("Unable to find episode in DB") else: airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"])) # check if TVRage and TVDB have the same airdate for this episode if curEpInfo['airdate'] == airdate: logger.log(u"Successful match for TVRage and TVDB data for episode " + str(curSeason) + "x1)", logger.DEBUG) return True logger.log(u"Date from TVDB for episode " + str(curSeason) + "x1: " + str(airdate), logger.DEBUG) logger.log(u"Date from TVRage for episode " + str(curSeason) + "x1: " + str(curEpInfo['airdate']), logger.DEBUG) except Exception, e: logger.log(u"Error encountered while checking TVRage<->TVDB sync: " + ex(e), logger.WARNING) logger.log(traceback.format_exc(), logger.DEBUG) return False def checkSync(self, info=None): logger.log(u"Checking the last aired episode to see if the dates match between TVDB and TVRage") if self.lastEpInfo == None or self.nextEpInfo == None: self._saveLatestInfo(info) if self.nextEpInfo['season'] == 0 or self.nextEpInfo['episode'] == 0: return None try: airdate = None tvdb_lang = self.show.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang # make sure the last TVDB episode matches our last episode try: t = tvdb_api.Tvdb(**ltvdb_api_parms) ep = t[self.show.tvdbid][self.lastEpInfo['season']][self.lastEpInfo['episode']] if ep["firstaired"] == "" or ep["firstaired"] == None: return None rawAirdate = [int(x) for x in ep["firstaired"].split("-")] airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2]) except tvdb_exceptions.tvdb_exception, e: logger.log(u"Unable to check TVRage info against TVDB: " + ex(e)) logger.log(u"Trying against DB instead", logger.DEBUG) myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? and episode = ?", [self.show.tvdbid, self.lastEpInfo['season'], self.lastEpInfo['episode']]) if len(sqlResults) == 0: raise exceptions.EpisodeNotFoundException("Unable to find episode in DB") else: airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"])) logger.log(u"Date from TVDB for episode " + str(self.lastEpInfo['season']) + "x" + str(self.lastEpInfo['episode']) + ": " + str(airdate), logger.DEBUG) logger.log(u"Date from TVRage for episode " + str(self.lastEpInfo['season']) + "x" + str(self.lastEpInfo['episode']) + ": " + str(self.lastEpInfo['airdate']), logger.DEBUG) if self.lastEpInfo['airdate'] == airdate: return True except Exception, e: logger.log(u"Error encountered while checking TVRage<->TVDB sync: " + ex(e), logger.WARNING) logger.log(traceback.format_exc(), logger.DEBUG) return False def _getTVRageInfo(self, season=None, episode=None, full=False): url = "http://services.tvrage.com/tools/quickinfo.php?" # if we need full info OR if we don't have a tvrage id, use show name if full == True or self.show.tvrid == 0: if self.show.tvrname != "" and self.show.tvrname != None: showName = self.show.tvrname else: showName = self.show.name urlData = {'show': showName.encode('utf-8')} # if we don't need full info and we have a tvrage id, use it else: urlData = {'sid': self.show.tvrid} if season != None and episode != None: urlData['ep'] = str(season) + 'x' + str(episode) # build the URL url += urllib.urlencode(urlData) logger.log(u"Loading TVRage info from URL: " + url, logger.DEBUG) result = helpers.getURL(url) if result is None: raise exceptions.TVRageException("urlopen call to " + url + " failed") else: result = result.decode('utf-8') urlData = result.splitlines() info = {} for x in urlData: if x.startswith("No Show Results Were Found"): logger.log(u"TVRage returned: " + x.encode('utf-8'), logger.WARNING) return info if "@" in x: key, value = x.split("@") if key: key = key.replace('<pre>', '') info[key] = value.strip() else: logger.log(u"TVRage returned: " + x.encode('utf-8'), logger.WARNING) return info # save it for later in case somebody is curious if 'Show ID' in info: self._tvrid = info['Show ID'] if 'Show Name' in info: self._tvrname = info['Show Name'] return info def _saveLatestInfo(self, info=None): if info == None: info = self._getTVRageInfo() if 'Next Episode' not in info or 'Latest Episode' not in info: raise exceptions.TVRageException("TVRage doesn't have all the required info for this show") self.lastEpInfo = self._getEpInfo(info['Latest Episode']) self.nextEpInfo = self._getEpInfo(info['Next Episode']) if self.lastEpInfo == None or self.nextEpInfo == None: raise exceptions.TVRageException("TVRage has malformed data, unable to update the show") def _getEpInfo(self, epString): month_dict = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12} logger.log(u"Parsing info from TVRage: " + epString, logger.DEBUG) ep_info = epString.split('^') num_info = [int(x) for x in ep_info[0].split('x')] date_info = ep_info[2] try: air_date = year = month = day = None date_info_list = date_info.split("/") year = date_info_list[2] if date_info_list[0] in month_dict: month = month_dict[date_info_list[0]] day = date_info_list[1] else: day = date_info_list[0] month = month_dict[date_info_list[1]] air_date = datetime.date(int(year), int(month), int(day)) except: air_date = None if not air_date: logger.log(u"Unable to figure out the time from the TVRage data " + ep_info[2]) return None toReturn = {'season': int(num_info[0]), 'episode': num_info[1], 'name': ep_info[1], 'airdate': air_date} logger.log(u"Result of parse: " + str(toReturn), logger.DEBUG) return toReturn def findLatestEp(self): # will use tvrage name if it got set in the constructor, or tvdb name if not info = self._getTVRageInfo(full=True) if not self.checkSync(info): raise exceptions.TVRageException("TVRage info isn't in sync with TVDB, not using data") myDB = db.DBConnection() # double check that it's not already in there sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [self.show.tvdbid, self.nextEpInfo['season'], self.nextEpInfo['episode']]) if len(sqlResults) > 0: raise exceptions.TVRageException("Show is already in database, not adding the TVRage info") # insert it myDB.action("INSERT INTO tv_episodes (showid, tvdbid, name, season, episode, description, airdate, hasnfo, hastbn, status, location) VALUES (?,?,?,?,?,?,?,?,?,?,?)", \ [self.show.tvdbid, -1, self.nextEpInfo['name'], self.nextEpInfo['season'], self.nextEpInfo['episode'], '', self.nextEpInfo['airdate'].toordinal(), 0, 0, UNAIRED, '']) # once it's in the DB make an object and return it ep = None try: ep = self.show.getEpisode(self.nextEpInfo['season'], self.nextEpInfo['episode']) except exceptions.SickBeardException, e: logger.log(u"Unable to create episode from tvrage (could be for a variety of reasons): " + ex(e)) return ep
14,501
Python
.py
250
43.644
197
0.577825
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,331
processTV.py
midgetspy_Sick-Beard/sickbeard/processTV.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import shutil import time import sickbeard from sickbeard import common from sickbeard import postProcessor from sickbeard import db, helpers, exceptions from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex from sickbeard import logger def delete_folder(folder, check_empty=True): # check if it's a folder if not ek.ek(os.path.isdir, folder): return False # check if it isn't TV_DOWNLOAD_DIR if sickbeard.TV_DOWNLOAD_DIR: if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR): return False # check if it's empty folder when wanted checked if check_empty: check_files = ek.ek(os.listdir, folder) if check_files: return False # try deleting folder try: logger.log(u"Deleting folder: " + folder) shutil.rmtree(folder) except (OSError, IOError), e: logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING) return False return True def logHelper(logMessage, logLevel=logger.MESSAGE): logger.log(logMessage, logLevel) return logMessage + u"\n" def processDir(dirName, nzbName=None, method=None, recurse=False, pp_options={}): """ Scans through the files in dirName and processes whatever media files it finds dirName: The folder name to look in nzbName: The NZB name which resulted in this folder being downloaded method: The method of postprocessing: Automatic, Script, Manual recurse: Boolean for whether we should descend into subfolders or not """ returnStr = u"" returnStr += logHelper(u"Processing folder: " + dirName, logger.DEBUG) # if they passed us a real dir then assume it's the one we want if ek.ek(os.path.isdir, dirName): dirName = ek.ek(os.path.realpath, dirName) # if they've got a download dir configured then use it elif sickbeard.TV_DOWNLOAD_DIR and ek.ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR) \ and ek.ek(os.path.normpath, dirName) != ek.ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR): dirName = ek.ek(os.path.join, sickbeard.TV_DOWNLOAD_DIR, ek.ek(os.path.abspath, dirName).split(os.path.sep)[-1]) returnStr += logHelper(u"Trying to use folder: " + dirName, logger.DEBUG) # if we didn't find a real dir then quit if not ek.ek(os.path.isdir, dirName): returnStr += logHelper(u"Unable to figure out what folder to process. If your downloader and Sick Beard aren't on the same PC make sure you fill out your TV download dir in the config.", logger.DEBUG) return returnStr # TODO: check if it's failed and deal with it if it is if ek.ek(os.path.basename, dirName).startswith('_FAILED_'): returnStr += logHelper(u"The directory name indicates it failed to extract, cancelling", logger.DEBUG) return returnStr elif ek.ek(os.path.basename, dirName).startswith('_UNDERSIZED_'): returnStr += logHelper(u"The directory name indicates that it was previously rejected for being undersized, cancelling", logger.DEBUG) return returnStr elif ek.ek(os.path.basename, dirName).upper().startswith('_UNPACK'): returnStr += logHelper(u"The directory name indicates that this release is in the process of being unpacked, skipping", logger.DEBUG) return returnStr # make sure the dir isn't inside a show dir myDB = db.DBConnection() sqlResults = myDB.select("SELECT * FROM tv_shows") for sqlShow in sqlResults: if dirName.lower().startswith(ek.ek(os.path.realpath, sqlShow["location"]).lower() + os.sep) or dirName.lower() == ek.ek(os.path.realpath, sqlShow["location"]).lower(): returnStr += logHelper(u"You're trying to post process an existing show directory: " + dirName, logger.ERROR) returnStr += u"\n" return returnStr fileList = ek.ek(os.listdir, dirName) # split the list into video files and folders folders = filter(lambda x: ek.ek(os.path.isdir, ek.ek(os.path.join, dirName, x)), fileList) # videoFiles, sorted by size, process biggest file first. Leaves smaller same named file behind mediaFiles = filter(lambda x: ek.ek(os.path.exists, ek.ek(os.path.join, dirName, x)), filter(helpers.isMediaFile, fileList)) videoFiles = sorted(mediaFiles, key=lambda x: ek.ek(os.path.getsize, ek.ek(os.path.join, dirName, x)), reverse=True) remaining_video_files = list(videoFiles) num_videoFiles = len(videoFiles) # if there are no videofiles in parent and only one subfolder, pass the nzbName to child if num_videoFiles == 0 and len(folders) == 1: parent_nzbName = nzbName else: parent_nzbName = None # recursively process all the folders for cur_folder in folders: returnStr += u"\n" # use full path cur_folder = ek.ek(os.path.join, dirName, cur_folder) if helpers.is_hidden_folder(cur_folder): returnStr += logHelper(u"Ignoring hidden folder: " + cur_folder, logger.DEBUG) else: returnStr += logHelper(u"Recursively processing a folder: " + cur_folder, logger.DEBUG) returnStr += processDir(cur_folder, nzbName=parent_nzbName, recurse=True, method=method, pp_options=pp_options) remainingFolders = filter(lambda x: ek.ek(os.path.isdir, ek.ek(os.path.join, dirName, x)), fileList) if num_videoFiles == 0: returnStr += u"\n" returnStr += logHelper(u"There are no videofiles in folder: " + dirName, logger.DEBUG) # if there a no videofiles, try deleting empty folder if method != 'Manual': if delete_folder(dirName, check_empty=True): returnStr += logHelper(u"Deleted empty folder: " + dirName, logger.DEBUG) # if there's more than one videofile in the folder, files can be lost (overwritten) when nzbName contains only one episode. if num_videoFiles >= 2: nzbName = None # process any files in the dir for cur_video_file in videoFiles: cur_video_file_path = ek.ek(os.path.join, dirName, cur_video_file) if method == 'Automatic': # check if we processed this video file before cur_video_file_path_size = ek.ek(os.path.getsize, cur_video_file_path) myDB = db.DBConnection() search_sql = "SELECT tv_episodes.tvdbid, history.resource FROM tv_episodes INNER JOIN history ON history.showid=tv_episodes.showid" search_sql += " WHERE history.season=tv_episodes.season and history.episode=tv_episodes.episode" search_sql += " and tv_episodes.status IN (" + ",".join([str(x) for x in common.Quality.DOWNLOADED]) + ")" search_sql += " and history.resource LIKE ? and tv_episodes.file_size = ?" sql_results = myDB.select(search_sql, [cur_video_file_path, cur_video_file_path_size]) if len(sql_results): returnStr += logHelper(u"Ignoring file: " + cur_video_file_path + " looks like it's been processed already", logger.DEBUG) continue try: returnStr += u"\n" processor = postProcessor.PostProcessor(cur_video_file_path, nzb_name=nzbName, pp_options=pp_options) process_result = processor.process() process_fail_message = "" except exceptions.PostProcessingFailed, e: process_result = False process_fail_message = ex(e) except Exception, e: process_result = False process_fail_message = "Post Processor returned unhandled exception: " + ex(e) returnStr += processor.log # as long as the postprocessing was successful delete the old folder unless the config wants us not to if process_result: remaining_video_files.remove(cur_video_file) if not sickbeard.KEEP_PROCESSED_DIR and len(remaining_video_files) == 0 and len(remainingFolders) == 0: if delete_folder(dirName, check_empty=False): returnStr += logHelper(u"Deleted folder: " + dirName, logger.DEBUG) returnStr += logHelper(u"Processing succeeded for " + cur_video_file_path) else: returnStr += logHelper(u"Processing failed for " + cur_video_file_path + ": " + process_fail_message, logger.WARNING) return returnStr
9,409
Python
.py
163
48.92638
209
0.674907
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,332
nzbSplitter.py
midgetspy_Sick-Beard/sickbeard/nzbSplitter.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import xml.etree.cElementTree as etree import xml.etree import re from name_parser.parser import NameParser, InvalidNameException from sickbeard import logger, classes, helpers from sickbeard.common import Quality from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex def getSeasonNZBs(name, urlData, season): try: showXML = etree.ElementTree(etree.XML(urlData)) except SyntaxError: logger.log(u"Unable to parse the XML of " + name + ", not splitting it", logger.ERROR) return ({}, '') filename = name.replace(".nzb", "") nzbElement = showXML.getroot() regex = '([\w\._\ ]+)[\. ]S%02d[\. ]([\w\._\-\ ]+)[\- ]([\w_\-\ ]+?)' % season sceneNameMatch = re.search(regex, filename, re.I) if sceneNameMatch: showName, qualitySection, groupName = sceneNameMatch.groups() # @UnusedVariable else: logger.log(u"Unable to parse " + name + " into a scene name. If it's a valid one log a bug.", logger.ERROR) return ({}, '') regex = '(' + re.escape(showName) + '\.S%02d(?:[E0-9]+)\.[\w\._]+\-\w+' % season + ')' regex = regex.replace(' ', '.') epFiles = {} xmlns = None for curFile in nzbElement.getchildren(): xmlnsMatch = re.match("\{(http:\/\/[A-Za-z0-9_\.\/]+\/nzb)\}file", curFile.tag) if not xmlnsMatch: continue else: xmlns = xmlnsMatch.group(1) match = re.search(regex, curFile.get("subject"), re.I) if not match: #print curFile.get("subject"), "doesn't match", regex continue curEp = match.group(1) if curEp not in epFiles: epFiles[curEp] = [curFile] else: epFiles[curEp].append(curFile) return (epFiles, xmlns) def createNZBString(fileElements, xmlns): rootElement = etree.Element("nzb") if xmlns: rootElement.set("xmlns", xmlns) for curFile in fileElements: rootElement.append(stripNS(curFile, xmlns)) return xml.etree.ElementTree.tostring(rootElement, 'utf-8') def saveNZB(nzbName, nzbString): try: with ek.ek(open, nzbName + ".nzb", 'w') as nzb_fh: nzb_fh.write(nzbString) except EnvironmentError, e: logger.log(u"Unable to save NZB: " + ex(e), logger.ERROR) def stripNS(element, ns): element.tag = element.tag.replace("{" + ns + "}", "") for curChild in element.getchildren(): stripNS(curChild, ns) return element def splitResult(result): urlData = helpers.getURL(result.url) if urlData is None: logger.log(u"Unable to load url " + result.url + ", can't download season NZB", logger.ERROR) return False # parse the season ep name try: np = NameParser(False) parse_result = np.parse(result.name) except InvalidNameException: logger.log(u"Unable to parse the filename " + result.name + " into a valid episode", logger.WARNING) return False # bust it up season = parse_result.season_number if parse_result.season_number != None else 1 separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season) resultList = [] for newNZB in separateNZBs: logger.log(u"Split out " + newNZB + " from " + result.name, logger.DEBUG) # parse the name try: np = NameParser(False) parse_result = np.parse(newNZB) except InvalidNameException: logger.log(u"Unable to parse the filename " + newNZB + " into a valid episode", logger.WARNING) return False # make sure the result is sane if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1): logger.log(u"Found " + newNZB + " inside " + result.name + " but it doesn't seem to belong to the same season, ignoring it", logger.WARNING) continue elif len(parse_result.episode_numbers) == 0: logger.log(u"Found " + newNZB + " inside " + result.name + " but it doesn't seem to be a valid episode NZB, ignoring it", logger.WARNING) continue wantEp = True for epNo in parse_result.episode_numbers: if not result.extraInfo[0].wantEpisode(season, epNo, result.quality): logger.log(u"Ignoring result " + newNZB + " because we don't want an episode that is " + Quality.qualityStrings[result.quality], logger.DEBUG) wantEp = False break if not wantEp: continue # get all the associated episode objects epObjList = [] for curEp in parse_result.episode_numbers: epObjList.append(result.extraInfo[0].getEpisode(season, curEp)) # make a result curResult = classes.NZBDataSearchResult(epObjList) curResult.name = newNZB curResult.provider = result.provider curResult.quality = result.quality curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)] resultList.append(curResult) return resultList
6,090
Python
.py
131
38.015267
159
0.639925
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,333
helpers.py
midgetspy_Sick-Beard/sickbeard/helpers.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import glob import gzip import os import re import shutil import socket import stat import StringIO import time import traceback import urllib2 import cookielib import sys if sys.version_info >= (2, 7, 9): import ssl import zlib from lib import MultipartPostHandler from httplib import BadStatusLine try: import json except ImportError: from lib import simplejson as json from xml.dom.minidom import Node try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree from sickbeard.exceptions import MultipleShowObjectsException, ex from sickbeard import logger from sickbeard.common import USER_AGENT, mediaExtensions from sickbeard import db from sickbeard import encodingKludge as ek from sickbeard.notifiers import synoindex_notifier # workaround for broken urllib2 in python 2.6.5: wrong credentials lead to an infinite recursion if sys.version_info >= (2, 6, 5) and sys.version_info < (2, 6, 6): class HTTPBasicAuthHandler(urllib2.HTTPBasicAuthHandler): def retry_http_basic_auth(self, host, req, realm): # don't retry if auth failed if req.get_header(self.auth_header, None) is not None: return None return urllib2.HTTPBasicAuthHandler.retry_http_basic_auth(self, host, req, realm) def indentXML(elem, level=0): ''' Does our pretty printing, makes Matt very happy ''' i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indentXML(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: # Strip out the newlines from text if elem.text: elem.text = elem.text.replace('\n', ' ') if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def remove_extension(name): """ Remove download or media extension from name (if any) """ if name and "." in name: base_name, sep, extension = name.rpartition('.') # @UnusedVariable if base_name and extension.lower() in ['nzb', 'torrent'] + mediaExtensions: name = base_name return name def remove_non_release_groups(name): """ Remove non release groups from name """ if name and "-" in name: name_group = name.rsplit('-', 1) if name_group[-1].upper() in ["RP", "NZBGEEK"]: name = name_group[0] return name def replaceExtension(filename, newExt): ''' >>> replaceExtension('foo.avi', 'mkv') 'foo.mkv' >>> replaceExtension('.vimrc', 'arglebargle') '.vimrc' >>> replaceExtension('a.b.c', 'd') 'a.b.d' >>> replaceExtension('', 'a') '' >>> replaceExtension('foo.bar', '') 'foo.' ''' sepFile = filename.rpartition(".") if sepFile[0] == "": return filename else: return sepFile[0] + "." + newExt def isMediaFile(filename): # ignore samples if re.search('(^|[\W_])sample\d*[\W_]', filename.lower()): return False # ignore MAC OS's retarded "resource fork" files if filename.startswith('._'): return False sepFile = filename.rpartition(".") if sepFile[2].lower() in mediaExtensions: return True else: return False def sanitizeFileName(name): ''' >>> sanitizeFileName('a/b/c') 'a-b-c' >>> sanitizeFileName('abc') 'abc' >>> sanitizeFileName('a"b') 'ab' >>> sanitizeFileName('.a.b..') 'a.b' ''' # remove bad chars from the filename name = re.sub(r'[\\/\*]', '-', name) name = re.sub(r'[:"<>|?]', '', name) # remove leading/trailing periods and spaces name = name.strip(' .') return name def getURL(url, validate=False, cookies = cookielib.CookieJar(), password_mgr=None, throw_exc=False): """ Convenience method to directly retrieve the contents of a url """ obj = getURLFileLike(url, validate, cookies, password_mgr, throw_exc) if obj: return readURLFileLike(obj) else: return None def getURLFileLike(url, validate=False, cookies = cookielib.CookieJar(), password_mgr=None, throw_exc=False): """ Returns a file-like object same as returned by urllib2.urlopen but with Handlers configured for sickbeard. It allows for the use of cookies, multipart/form-data, https without certificate validation and both basic and digest HTTP authentication. In addition, the user-agent is set to the sickbeard default and accepts gzip and deflate encoding (which can be automatically handled when using readURL() to retrieve the contents) @param url: can be either a string or a Request object. @param validate: defines if SSL certificates should be validated on HTTPS connections @param cookies: is the cookielib.CookieJar in which cookies are stored. @param password_mgr: if given, should be something that is compatible with HTTPPasswordMgr @param throw_exc: throw the exception that was caught instead of None @return: the file-like object retrieved from the URL or None (or the exception) if it could not be retrieved """ # configure the OpenerDirector appropriately if not validate and sys.version_info >= (2, 7, 9): opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler.MultipartPostHandler, urllib2.HTTPSHandler(context=ssl._create_unverified_context()), urllib2.HTTPDigestAuthHandler(password_mgr), urllib2.HTTPBasicAuthHandler(password_mgr)) else: # Before python 2.7.9, there was no built-in way to validate SSL certificates # Since our default is not to validate, it is of low priority to make it available here if validate and sys.version_info < (2, 7, 9): logger.log(u"The SSL certificate will not be validated for " + url + "(python 2.7.9+ required)", logger.MESSAGE) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler.MultipartPostHandler, urllib2.HTTPDigestAuthHandler(password_mgr), urllib2.HTTPBasicAuthHandler(password_mgr)) # set the default headers for every request opener.addheaders = [('User-Agent', USER_AGENT), ('Accept-Encoding', 'gzip,deflate')] try: return opener.open(url) except urllib2.HTTPError, e: logger.log(u"HTTP error " + str(e.code) + " while loading URL " + url, logger.WARNING) if throw_exc: raise else: return None except urllib2.URLError, e: logger.log(u"URL error " + str(e.reason) + " while loading URL " + url, logger.WARNING) if throw_exc: raise else: return None except BadStatusLine: logger.log(u"BadStatusLine error while loading URL " + url, logger.WARNING) if throw_exc: raise else: return None except socket.timeout: logger.log(u"Timed out while loading URL " + url, logger.WARNING) if throw_exc: raise else: return None except ValueError: logger.log(u"Unknown error while loading URL " + url, logger.WARNING) if throw_exc: raise else: return None except Exception: logger.log(u"Unknown exception while loading URL " + url + ": " + traceback.format_exc(), logger.WARNING) if throw_exc: raise else: return None def readURLFileLike(urlFileLike): """ Return the contents of the file like objects as string, performing decompression if necessary. @param urlFileLike: is a file like objects same as returned by urllib2.urlopen() and getURL() """ encoding = urlFileLike.info().get("Content-Encoding") if encoding in ('gzip', 'x-gzip', 'deflate'): content = urlFileLike.read() if encoding == 'deflate': data = StringIO.StringIO(zlib.decompress(content)) else: data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(content)) result = data.read() else: result = urlFileLike.read() urlFileLike.close() return result; def is_hidden_folder(folder): """ Returns True if folder is hidden. On Linux based systems hidden folders start with . (dot) folder: Full path of folder to check """ if ek.ek(os.path.isdir, folder): if ek.ek(os.path.basename, folder).startswith('.'): return True return False def findCertainShow(showList, tvdbid): results = filter(lambda x: x.tvdbid == tvdbid, showList) if len(results) == 0: return None elif len(results) > 1: raise MultipleShowObjectsException() else: return results[0] def findCertainTVRageShow(showList, tvrid): if tvrid == 0: return None results = filter(lambda x: x.tvrid == tvrid, showList) if len(results) == 0: return None elif len(results) > 1: raise MultipleShowObjectsException() else: return results[0] def list_associated_files(file_path, base_name_only=False, filter_ext=""): """ For a given file path searches for files with the same name but different extension and returns their absolute paths file_path: The file to check for associated files base_name_only: False add extra '.' (conservative search) to file_path minus extension filter_ext: A comma separated string with extensions to include or empty string to include all matches Returns: A list containing all files which are associated to the given file """ if not file_path: return [] file_path_list = [] base_name = file_path.rpartition('.')[0] if not base_name_only: base_name = base_name + '.' # don't strip it all and use cwd by accident if not base_name: return [] # don't confuse glob with chars we didn't mean to use base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name) if filter_ext: # convert to tuple of extensions to restrict to filter_ext = tuple(x.lower().strip() for x in filter_ext.split(',')) for associated_file_path in ek.ek(glob.glob, base_name + '*'): # only add associated to list if associated_file_path == file_path: continue if ek.ek(os.path.isfile, associated_file_path): if filter_ext: if associated_file_path.lower().endswith(filter_ext): file_path_list.append(associated_file_path) else: file_path_list.append(associated_file_path) return file_path_list def makeDir(path): if not ek.ek(os.path.isdir, path): try: ek.ek(os.makedirs, path) # do the library update for synoindex synoindex_notifier.addFolder(path) except OSError: return False return True def searchDBForShow(regShowName): showNames = [re.sub('[. -]', ' ', regShowName)] myDB = db.DBConnection() yearRegex = "([^()]+?)\s*(\()?(\d{4})(?(2)\))$" for showName in showNames: sqlResults = myDB.select("SELECT * FROM tv_shows WHERE show_name LIKE ? OR tvr_name LIKE ?", [showName, showName]) if len(sqlResults) == 1: return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"]) else: # if we didn't get exactly one result then try again with the year stripped off if possible match = re.match(yearRegex, showName) if match and match.group(1): logger.log(u"Unable to match original name but trying to manually strip and specify show year", logger.DEBUG) sqlResults = myDB.select("SELECT * FROM tv_shows WHERE (show_name LIKE ? OR tvr_name LIKE ?) AND startyear = ?", [match.group(1) + '%', match.group(1) + '%', match.group(3)]) if len(sqlResults) == 0: logger.log(u"Unable to match a record in the DB for " + showName, logger.DEBUG) continue elif len(sqlResults) > 1: logger.log(u"Multiple results for " + showName + " in the DB, unable to match show name", logger.DEBUG) continue else: return (int(sqlResults[0]["tvdb_id"]), sqlResults[0]["show_name"]) return None def sizeof_fmt(num): ''' >>> sizeof_fmt(2) '2.0 bytes' >>> sizeof_fmt(1024) '1.0 KB' >>> sizeof_fmt(2048) '2.0 KB' >>> sizeof_fmt(2**20) '1.0 MB' >>> sizeof_fmt(1234567) '1.2 MB' ''' for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return "%3.1f %s" % (num, x) num /= 1024.0 def listMediaFiles(path): if not path or not ek.ek(os.path.isdir, path): return [] files = [] for curFile in ek.ek(os.listdir, path): fullCurFile = ek.ek(os.path.join, path, curFile) # if it's a folder do it recursively if ek.ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras': files += listMediaFiles(fullCurFile) elif isMediaFile(curFile): files.append(fullCurFile) return files def copyFile(srcFile, destFile): ek.ek(shutil.copyfile, srcFile, destFile) try: ek.ek(shutil.copymode, srcFile, destFile) except OSError: pass def moveFile(srcFile, destFile): try: ek.ek(os.rename, srcFile, destFile) fixSetGroupID(destFile) except OSError: copyFile(srcFile, destFile) ek.ek(os.unlink, srcFile) def make_dirs(path): """ Creates any folders that are missing and assigns them the permissions of their parents """ logger.log(u"Checking if the path " + path + " already exists", logger.DEBUG) if not ek.ek(os.path.isdir, path): # Windows, create all missing folders if os.name == 'nt' or os.name == 'ce': try: logger.log(u"Folder " + path + " didn't exist, creating it", logger.DEBUG) ek.ek(os.makedirs, path) except (OSError, IOError), e: logger.log(u"Failed creating " + path + " : " + ex(e), logger.ERROR) return False # not Windows, create all missing folders and set permissions else: sofar = '' folder_list = path.split(os.path.sep) # look through each subfolder and make sure they all exist for cur_folder in folder_list: sofar += cur_folder + os.path.sep # if it exists then just keep walking down the line if ek.ek(os.path.isdir, sofar): continue try: logger.log(u"Folder " + sofar + " didn't exist, creating it", logger.DEBUG) ek.ek(os.mkdir, sofar) # use normpath to remove end separator, otherwise checks permissions against itself chmodAsParent(ek.ek(os.path.normpath, sofar)) # do the library update for synoindex synoindex_notifier.addFolder(sofar) except (OSError, IOError), e: logger.log(u"Failed creating " + sofar + " : " + ex(e), logger.ERROR) return False return True def rename_ep_file(cur_path, new_path, old_path_length=0): """ Creates all folders needed to move a file to its new location, renames it, then cleans up any folders left that are now empty. cur_path: The absolute path to the file you want to move/rename new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION """ new_dest_dir, new_dest_name = os.path.split(new_path) # @UnusedVariable if old_path_length == 0 or old_path_length > len(cur_path): # approach from the right cur_file_name, cur_file_ext = os.path.splitext(cur_path) # @UnusedVariable else: # approach from the left cur_file_ext = cur_path[old_path_length:] # put the extension on the incoming file new_path += cur_file_ext make_dirs(os.path.dirname(new_path)) # move the file try: logger.log(u"Renaming file from " + cur_path + " to " + new_path) ek.ek(os.rename, cur_path, new_path) except (OSError, IOError), e: logger.log(u"Failed renaming " + cur_path + " to " + new_path + ": " + ex(e), logger.ERROR) return False # clean up any old folders that are empty delete_empty_folders(ek.ek(os.path.dirname, cur_path)) return True def delete_empty_folders(check_empty_dir, keep_dir=None): """ Walks backwards up the path and deletes any empty folders found. check_empty_dir: The path to clean (absolute path to a folder) keep_dir: Clean until this path is reached """ # treat check_empty_dir as empty when it only contains these items ignore_items = [] logger.log(u"Trying to clean any empty folders under " + check_empty_dir) # as long as the folder exists and doesn't contain any files, delete it while ek.ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir: check_files = ek.ek(os.listdir, check_empty_dir) if not check_files or (len(check_files) <= len(ignore_items) and all([check_file in ignore_items for check_file in check_files])): # directory is empty or contains only ignore_items try: logger.log(u"Deleting empty folder: " + check_empty_dir) # need shutil.rmtree when ignore_items is really implemented ek.ek(os.rmdir, check_empty_dir) # do the library update for synoindex synoindex_notifier.deleteFolder(check_empty_dir) except OSError, e: logger.log(u"Unable to delete " + check_empty_dir + ": " + repr(e) + " / " + str(e), logger.WARNING) break check_empty_dir = ek.ek(os.path.dirname, check_empty_dir) else: break def chmodAsParent(childPath): if os.name == 'nt' or os.name == 'ce': return parentPath = ek.ek(os.path.dirname, childPath) if not parentPath: logger.log(u"No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG) return parentPathStat = ek.ek(os.stat, parentPath) parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE]) childPathStat = ek.ek(os.stat, childPath) childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE]) if ek.ek(os.path.isfile, childPath): childMode = fileBitFilter(parentMode) else: childMode = parentMode if childPath_mode == childMode: return childPath_owner = childPathStat.st_uid user_id = os.geteuid() # @UndefinedVariable - only available on UNIX if user_id != 0 and user_id != childPath_owner: logger.log(u"Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG) return try: ek.ek(os.chmod, childPath, childMode) logger.log(u"Setting permissions for %s to %o as parent directory has %o" % (childPath, childMode, parentMode), logger.DEBUG) except OSError: logger.log(u"Failed to set permission for %s to %o" % (childPath, childMode), logger.ERROR) def fileBitFilter(mode): for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]: if mode & bit: mode -= bit return mode def fixSetGroupID(childPath): if os.name == 'nt' or os.name == 'ce': return parentPath = ek.ek(os.path.dirname, childPath) parentStat = os.stat(parentPath) parentMode = stat.S_IMODE(parentStat[stat.ST_MODE]) if parentMode & stat.S_ISGID: parentGID = parentStat[stat.ST_GID] childStat = ek.ek(os.stat, childPath) childGID = childStat[stat.ST_GID] if childGID == parentGID: return childPath_owner = childStat.st_uid user_id = os.geteuid() # @UndefinedVariable - only available on UNIX if user_id != 0 and user_id != childPath_owner: logger.log(u"Not running as root or owner of " + childPath + ", not trying to set the set-group-ID", logger.DEBUG) return try: ek.ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX logger.log(u"Respecting the set-group-ID bit on the parent directory for %s" % (childPath), logger.DEBUG) except OSError: logger.log(u"Failed to respect the set-group-ID bit on the parent directory for %s (setting group ID %i)" % (childPath, parentGID), logger.ERROR) def real_path(path): """ Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components. """ return ek.ek(os.path.normpath, ek.ek(os.path.normcase, ek.ek(os.path.realpath, path))) def sanitizeSceneName(name, ezrss=False): """ Takes a show name and returns the "scenified" version of it. ezrss: If true the scenified version will follow EZRSS's cracksmoker rules as best as possible Returns: A string containing the scene version of the show name given. """ if not ezrss: bad_chars = u",:()'!?\u2019" # ezrss leaves : and ! in their show names as far as I can tell else: bad_chars = u",()'?\u2019" # strip out any bad chars for x in bad_chars: name = name.replace(x, "") # tidy up stuff that doesn't belong in scene names name = name.replace("- ", ".").replace(" ", ".").replace("&", "and").replace('/', '.') name = re.sub("\.\.*", ".", name) if name.endswith('.'): name = name[:-1] return name def create_https_certificates(ssl_cert, ssl_key): """ Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key' """ try: from OpenSSL import crypto # @UnresolvedImport from lib.certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, serial # @UnresolvedImport except: logger.log(u"pyopenssl module missing, please install for https access", logger.WARNING) return False # Create the CA Certificate cakey = createKeyPair(TYPE_RSA, 1024) careq = createCertRequest(cakey, CN='Certificate Authority') cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years cname = 'SickBeard' pkey = createKeyPair(TYPE_RSA, 1024) req = createCertRequest(pkey, CN=cname) cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years # Save the key and certificate to disk try: open(ssl_key, 'w').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) open(ssl_cert, 'w').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) except: logger.log(u"Error creating SSL key and certificate", logger.ERROR) return False return True if __name__ == '__main__': import doctest doctest.testmod() def parse_json(data): """ Parse json data into a python object data: data string containing json Returns: parsed data as json or None """ try: parsedJSON = json.loads(data) except ValueError, e: logger.log(u"Error trying to decode json data. Error: " + ex(e), logger.DEBUG) return None return parsedJSON def parse_xml(data, del_xmlns=False): """ Parse data into an xml elementtree.ElementTree data: data string containing xml del_xmlns: if True, removes xmlns namesspace from data before parsing Returns: parsed data as elementtree or None """ if del_xmlns: data = re.sub(' xmlns="[^"]+"', '', data) try: parsedXML = etree.fromstring(data) except Exception, e: logger.log(u"Error trying to parse xml data. Error: " + ex(e), logger.DEBUG) parsedXML = None return parsedXML def get_xml_text(element, mini_dom=False): """ Get all text inside a xml element element: A xml element either created with elementtree.ElementTree or xml.dom.minidom mini_dom: Default False use elementtree, True use minidom Returns: text """ text = "" if mini_dom: node = element for child in node.childNodes: if child.nodeType in (Node.CDATA_SECTION_NODE, Node.TEXT_NODE): text += child.data else: if element is not None: for child in [element] + element.findall('.//*'): if child.text: text += child.text return text.strip() def backupVersionedFile(old_file, version): numTries = 0 new_file = old_file + '.' + 'v' + str(version) while not ek.ek(os.path.isfile, new_file): if not ek.ek(os.path.isfile, old_file): logger.log(u"Not creating backup, " + old_file + " doesn't exist", logger.DEBUG) break try: logger.log(u"Trying to back up " + old_file + " to " + new_file, logger.DEBUG) shutil.copy(old_file, new_file) logger.log(u"Backup done", logger.DEBUG) break except Exception, e: logger.log(u"Error while trying to back up " + old_file + " to " + new_file + " : " + ex(e), logger.WARNING) numTries += 1 time.sleep(1) logger.log(u"Trying again.", logger.DEBUG) if numTries >= 10: logger.log(u"Unable to back up " + old_file + " to " + new_file + " please do it manually.", logger.ERROR) return False return True
27,942
Python
.py
640
34.41875
191
0.618948
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,334
searchCurrent.py
midgetspy_Sick-Beard/sickbeard/searchCurrent.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import sickbeard from sickbeard import search_queue import threading class CurrentSearcher(): def __init__(self): self.lock = threading.Lock() self.amActive = False def run(self): search_queue_item = search_queue.RSSSearchQueueItem() sickbeard.searchQueueScheduler.action.add_item(search_queue_item) #@UndefinedVariable
1,190
Python
.py
28
38.5
94
0.739965
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,335
sab.py
midgetspy_Sick-Beard/sickbeard/sab.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import httplib import datetime import sickbeard import urllib2 import StringIO try: import json except ImportError: from lib import simplejson as json from sickbeard import logger, helpers from sickbeard.exceptions import ex def sendNZB(nzb): """ Sends an NZB to SABnzbd via the API. nzb: The NZBSearchResult object to send to SAB """ # set up a dict with the URL params in it params = {} if sickbeard.SAB_USERNAME is not None: params['ma_username'] = sickbeard.SAB_USERNAME if sickbeard.SAB_PASSWORD is not None: params['ma_password'] = sickbeard.SAB_PASSWORD if sickbeard.SAB_APIKEY is not None: params['apikey'] = sickbeard.SAB_APIKEY if sickbeard.SAB_CATEGORY is not None: params['cat'] = sickbeard.SAB_CATEGORY # if it aired recently make it high priority for curEp in nzb.episodes: if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7): params['priority'] = 1 # if it's a normal result we just pass SAB the URL if nzb.resultType == "nzb": params['mode'] = 'addurl' params['name'] = nzb.url # if we get a raw data result we want to upload it to SAB elif nzb.resultType == "nzbdata": params['mode'] = 'addfile' multiPartParams = {"nzbfile": (nzb.name + ".nzb", nzb.extraInfo[0])} url = sickbeard.SAB_HOST + "api?" + urllib.urlencode(params) logger.log(u"Sending NZB to SABnzbd: %s" % nzb.name) logger.log(u"SABnzbd URL: " + url, logger.DEBUG) try: # if we have the URL to an NZB then we've built up the SAB API URL already so just call it if nzb.resultType == "nzb": req = urllib2.Request(url) # if we are uploading the NZB data to SAB then we need to build a little POST form and send it elif nzb.resultType == "nzbdata": req = urllib2.Request(url,multiPartParams) result = helpers.getURL(req, throw_exc=True) except (EOFError, IOError), e: logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR) return False except httplib.InvalidURL, e: logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR) return False # SAB shouldn't return a blank result, this most likely (but not always) means that it timed out and didn't receive the NZB if len(result) == 0: logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR) return False # massage the result a little bit sabText = result.strip() logger.log(u"Result text from SAB: " + sabText, logger.DEBUG) # do some crude parsing of the result text to determine what SAB said if sabText == "ok": logger.log(u"NZB sent to SAB successfully", logger.DEBUG) return True elif sabText == "Missing authentication": logger.log(u"Incorrect username/password sent to SAB, NZB not sent", logger.ERROR) return False else: logger.log(u"Unknown failure sending NZB to sab. Return text is: " + sabText, logger.ERROR) return False def _checkSabResponse(f): try: result = f.readlines() except Exception, e: logger.log(u"Error trying to get result from SAB" + ex(e), logger.ERROR) return False, "Error from SAB" if len(result) == 0: logger.log(u"No data returned from SABnzbd, NZB not sent", logger.ERROR) return False, "No data from SAB" sabText = result[0].strip() sabJson = {} try: sabJson = json.loads(sabText) except ValueError, e: pass if sabText == "Missing authentication": logger.log(u"Incorrect username/password sent to SAB", logger.ERROR) return False, "Incorrect username/password sent to SAB" elif 'error' in sabJson: logger.log(sabJson['error'], logger.ERROR) return False, sabJson['error'] else: return True, sabText def _sabURLOpenSimple(url): try: result = helpers.getURL(url, throw_exc=True) f = StringIO.StringIO(result) except (EOFError, IOError), e: logger.log(u"Unable to connect to SAB: " + ex(e), logger.ERROR) return False, "Unable to connect" except httplib.InvalidURL, e: logger.log(u"Invalid SAB host, check your config: " + ex(e), logger.ERROR) return False, "Invalid SAB host" if f is None: logger.log(u"No data returned from SABnzbd", logger.ERROR) return False, "No data returned from SABnzbd" else: return True, f def getSabAccesMethod(host=None, username=None, password=None, apikey=None): url = host + "api?mode=auth" result, f = _sabURLOpenSimple(url) if not result: return False, f result, sabText = _checkSabResponse(f) if not result: return False, sabText return True, sabText def testAuthentication(host=None, username=None, password=None, apikey=None): """ Sends a simple API request to SAB to determine if the given connection information is connect host: The host where SAB is running (incl port) username: The username to use for the HTTP request password: The password to use for the HTTP request apikey: The API key to provide to SAB Returns: A tuple containing the success boolean and a message """ # build up the URL parameters params = {} params['mode'] = 'queue' params['output'] = 'json' params['ma_username'] = username params['ma_password'] = password params['apikey'] = apikey url = host + "api?" + urllib.urlencode(params) # send the test request logger.log(u"SABnzbd test URL: " + url, logger.DEBUG) result, f = _sabURLOpenSimple(url) if not result: return False, f # check the result and determine if it's good or not result, sabText = _checkSabResponse(f) if not result: return False, sabText return True, "Success"
6,921
Python
.py
164
35.115854
128
0.662375
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,336
common.py
midgetspy_Sick-Beard/sickbeard/common.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os.path import operator import platform import re from sickbeard import version USER_AGENT = 'Sick Beard/alpha2-' + version.SICKBEARD_VERSION.replace(' ', '-') + ' (' + platform.system() + ' ' + platform.release() + ')' mediaExtensions = ['avi', 'mkv', 'mpg', 'mpeg', 'wmv', 'ogm', 'mp4', 'iso', 'img', 'divx', 'm2ts', 'm4v', 'ts', 'flv', 'f4v', 'mov', 'rmvb', 'vob', 'dvr-ms', 'wtv', 'ogv', '3gp', 'webm'] ### Other constants MULTI_EP_RESULT = -1 SEASON_RESULT = -2 ### Notification Types NOTIFY_SNATCH = 1 NOTIFY_DOWNLOAD = 2 notifyStrings = {} notifyStrings[NOTIFY_SNATCH] = "Started Download" notifyStrings[NOTIFY_DOWNLOAD] = "Download Finished" ### Episode statuses UNKNOWN = -1 # should never happen UNAIRED = 1 # episodes that haven't aired yet SNATCHED = 2 # qualified with quality WANTED = 3 # episodes we don't have but want to get DOWNLOADED = 4 # qualified with quality SKIPPED = 5 # episodes we don't want ARCHIVED = 6 # episodes that you don't have locally (counts toward download completion stats) IGNORED = 7 # episodes that you don't want included in your download stats SNATCHED_PROPER = 9 # qualified with quality NAMING_REPEAT = 1 NAMING_EXTEND = 2 NAMING_DUPLICATE = 4 NAMING_LIMITED_EXTEND = 8 NAMING_SEPARATED_REPEAT = 16 NAMING_LIMITED_EXTEND_E_PREFIXED = 32 multiEpStrings = {} multiEpStrings[NAMING_REPEAT] = "Repeat" multiEpStrings[NAMING_SEPARATED_REPEAT] = "Repeat (Separated)" multiEpStrings[NAMING_DUPLICATE] = "Duplicate" multiEpStrings[NAMING_EXTEND] = "Extend" multiEpStrings[NAMING_LIMITED_EXTEND] = "Extend (Limited)" multiEpStrings[NAMING_LIMITED_EXTEND_E_PREFIXED] = "Extend (Limited, E-prefixed)" class Quality: NONE = 0 # 0 SDTV = 1 # 1 SDDVD = 1 << 1 # 2 HDTV = 1 << 2 # 4 RAWHDTV = 1 << 3 # 8 -- 720p/1080i mpeg2 (trollhd releases) FULLHDTV = 1 << 4 # 16 -- 1080p HDTV (QCF releases) HDWEBDL = 1 << 5 # 32 FULLHDWEBDL = 1 << 6 # 64 -- 1080p web-dl HDBLURAY = 1 << 7 # 128 FULLHDBLURAY = 1 << 8 # 256 # put these bits at the other end of the spectrum, far enough out that they shouldn't interfere UNKNOWN = 1 << 15 # 32768 qualityStrings = {NONE: "N/A", UNKNOWN: "Unknown", SDTV: "SD TV", SDDVD: "SD DVD", HDTV: "HD TV", RAWHDTV: "RawHD TV", FULLHDTV: "1080p HD TV", HDWEBDL: "720p WEB-DL", FULLHDWEBDL: "1080p WEB-DL", HDBLURAY: "720p BluRay", FULLHDBLURAY: "1080p BluRay"} statusPrefixes = {DOWNLOADED: "Downloaded", SNATCHED: "Snatched"} @staticmethod def _getStatusStrings(status): toReturn = {} for x in Quality.qualityStrings.keys(): toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status] + " (" + Quality.qualityStrings[x] + ")" return toReturn @staticmethod def combineQualities(anyQualities, bestQualities): anyQuality = 0 bestQuality = 0 if anyQualities: anyQuality = reduce(operator.or_, anyQualities) if bestQualities: bestQuality = reduce(operator.or_, bestQualities) return anyQuality | (bestQuality << 16) @staticmethod def splitQuality(quality): anyQualities = [] bestQualities = [] for curQual in Quality.qualityStrings.keys(): if curQual & quality: anyQualities.append(curQual) if curQual << 16 & quality: bestQualities.append(curQual) return (sorted(anyQualities), sorted(bestQualities)) @staticmethod def nameQuality(name): name = os.path.basename(name) # if we have our exact text then assume we put it there for x in sorted(Quality.qualityStrings, reverse=True): if x == Quality.UNKNOWN: continue regex = '\W' + Quality.qualityStrings[x].replace(' ', '\W') + '\W' regex_match = re.search(regex, name, re.I) if regex_match: return x checkName = lambda namelist, func: func([re.search(x, name, re.I) for x in namelist]) if checkName(["(pdtv|hdtv|dsr|tvrip).(xvid|x264)"], all) and not checkName(["(720|1080)[pi]"], all) and not checkName(["hr.ws.pdtv.x264"], any): return Quality.SDTV elif checkName(["web.dl|webrip", "xvid|x264|h.?264"], all) and not checkName(["(720|1080)[pi]"], all): return Quality.SDTV elif checkName(["(dvdrip|bdrip)(.ws)?.(xvid|divx|x264)"], any) and not checkName(["(720|1080)[pi]"], all): return Quality.SDDVD elif checkName(["720p", "hdtv", "x264"], all) or checkName(["hr.ws.pdtv.x264"], any) and not checkName(["(1080)[pi]"], all): return Quality.HDTV elif checkName(["720p|1080i", "hdtv", "mpeg-?2"], all) or checkName(["1080[pi].hdtv", "h.?264"], all): return Quality.RAWHDTV elif checkName(["1080p", "hdtv", "x264"], all): return Quality.FULLHDTV elif checkName(["720p", "web.dl|webrip"], all) or checkName(["720p", "itunes", "h.?264"], all): return Quality.HDWEBDL elif checkName(["1080p", "web.dl|webrip"], all) or checkName(["1080p", "itunes", "h.?264"], all): return Quality.FULLHDWEBDL elif checkName(["720p", "bluray|hddvd", "x264"], all): return Quality.HDBLURAY elif checkName(["1080p", "bluray|hddvd", "x264"], all): return Quality.FULLHDBLURAY else: return Quality.UNKNOWN @staticmethod def assumeQuality(name): if name.lower().endswith((".avi", ".mp4")): return Quality.SDTV elif name.lower().endswith(".mkv"): return Quality.HDTV elif name.lower().endswith(".ts"): return Quality.RAWHDTV else: return Quality.UNKNOWN @staticmethod def compositeStatus(status, quality): return status + 100 * quality @staticmethod def qualityDownloaded(status): return (status - DOWNLOADED) / 100 @staticmethod def splitCompositeStatus(status): """Returns a tuple containing (status, quality)""" if status == UNKNOWN: return (UNKNOWN, Quality.UNKNOWN) for x in sorted(Quality.qualityStrings.keys(), reverse=True): if status > x * 100: return (status - x * 100, x) return (status, Quality.NONE) @staticmethod def statusFromName(name, assume=True): quality = Quality.nameQuality(name) if assume and quality == Quality.UNKNOWN: quality = Quality.assumeQuality(name) return Quality.compositeStatus(DOWNLOADED, quality) DOWNLOADED = None SNATCHED = None SNATCHED_PROPER = None Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in Quality.qualityStrings.keys()] Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in Quality.qualityStrings.keys()] Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in Quality.qualityStrings.keys()] SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], []) HD = Quality.combineQualities([Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY], []) # HD720p + HD1080p HD720p = Quality.combineQualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], []) HD1080p = Quality.combineQualities([Quality.FULLHDTV, Quality.FULLHDWEBDL, Quality.FULLHDBLURAY], []) ANY = Quality.combineQualities([Quality.SDTV, Quality.SDDVD, Quality.HDTV, Quality.FULLHDTV, Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY, Quality.UNKNOWN], []) # SD + HD qualityPresets = (SD, HD, HD720p, HD1080p, ANY) qualityPresetStrings = {SD: "SD", HD: "HD", HD720p: "HD720p", HD1080p: "HD1080p", ANY: "Any"} class StatusStrings: def __init__(self): self.statusStrings = {UNKNOWN: "Unknown", UNAIRED: "Unaired", SNATCHED: "Snatched", DOWNLOADED: "Downloaded", SKIPPED: "Skipped", SNATCHED_PROPER: "Snatched (Proper)", WANTED: "Wanted", ARCHIVED: "Archived", IGNORED: "Ignored"} def __getitem__(self, name): if name in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER: status, quality = Quality.splitCompositeStatus(name) if quality == Quality.NONE: return self.statusStrings[status] else: return self.statusStrings[status] + " (" + Quality.qualityStrings[quality] + ")" else: return self.statusStrings[name] def has_key(self, name): return name in self.statusStrings or name in Quality.DOWNLOADED or name in Quality.SNATCHED or name in Quality.SNATCHED_PROPER statusStrings = StatusStrings() class Overview: UNAIRED = UNAIRED # 1 QUAL = 2 WANTED = WANTED # 3 GOOD = 4 SKIPPED = SKIPPED # 5 # For both snatched statuses. Note: SNATCHED/QUAL have same value and break dict. SNATCHED = SNATCHED_PROPER # 9 overviewStrings = {SKIPPED: "skipped", WANTED: "wanted", QUAL: "qual", GOOD: "good", UNAIRED: "unaired", SNATCHED: "snatched"} # Get our xml namespaces correct for lxml XML_NSMAP = {'xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xsd': 'http://www.w3.org/2001/XMLSchema'} countryList = {'Australia': 'AU', 'Canada': 'CA', 'USA': 'US' }
11,290
Python
.py
237
37.447257
204
0.596587
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,337
image_cache.py
midgetspy_Sick-Beard/sickbeard/image_cache.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os.path import sickbeard from sickbeard import helpers, logger, exceptions from sickbeard import encodingKludge as ek from sickbeard.metadata.generic import GenericMetadata from lib.hachoir_parser import createParser from lib.hachoir_metadata import extractMetadata class ImageCache: def __init__(self): pass def _cache_dir(self): """ Builds up the full path to the image cache directory """ return ek.ek(os.path.abspath, ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images')) def poster_path(self, tvdb_id): """ Builds up the path to a poster cache for a given tvdb id returns: a full path to the cached poster file for the given tvdb id tvdb_id: ID of the show to use in the file name """ poster_file_name = str(tvdb_id) + '.poster.jpg' return ek.ek(os.path.join, self._cache_dir(), poster_file_name) def banner_path(self, tvdb_id): """ Builds up the path to a banner cache for a given tvdb id returns: a full path to the cached banner file for the given tvdb id tvdb_id: ID of the show to use in the file name """ banner_file_name = str(tvdb_id) + '.banner.jpg' return ek.ek(os.path.join, self._cache_dir(), banner_file_name) def has_poster(self, tvdb_id): """ Returns true if a cached poster exists for the given tvdb id """ poster_path = self.poster_path(tvdb_id) logger.log(u"Checking if file "+str(poster_path)+" exists", logger.DEBUG) return ek.ek(os.path.isfile, poster_path) def has_banner(self, tvdb_id): """ Returns true if a cached banner exists for the given tvdb id """ banner_path = self.banner_path(tvdb_id) logger.log(u"Checking if file "+str(banner_path)+" exists", logger.DEBUG) return ek.ek(os.path.isfile, banner_path) BANNER = 1 POSTER = 2 def which_type(self, path): """ Analyzes the image provided and attempts to determine whether it is a poster or banner. returns: BANNER, POSTER if it concluded one or the other, or None if the image was neither (or didn't exist) path: full path to the image """ if not ek.ek(os.path.isfile, path): logger.log(u"Couldn't check the type of "+str(path)+" cause it doesn't exist", logger.WARNING) return None # use hachoir to parse the image for us img_parser = createParser(path) img_metadata = extractMetadata(img_parser) if not img_metadata: logger.log(u"Unable to get metadata from "+str(path)+", not using your existing image", logger.DEBUG) return None img_ratio = float(img_metadata.get('width'))/float(img_metadata.get('height')) img_parser.stream._input.close() # most posters are around 0.68 width/height ratio (eg. 680/1000) if 0.55 < img_ratio < 0.8: return self.POSTER # most banners are around 5.4 width/height ratio (eg. 758/140) elif 5 < img_ratio < 6: return self.BANNER else: logger.log(u"Image has size ratio of "+str(img_ratio)+", unknown type", logger.WARNING) return None def _cache_image_from_file(self, image_path, img_type, tvdb_id): """ Takes the image provided and copies it to the cache folder returns: bool representing success image_path: path to the image we're caching img_type: BANNER or POSTER tvdb_id: id of the show this image belongs to """ # generate the path based on the type & tvdb_id if img_type == self.POSTER: dest_path = self.poster_path(tvdb_id) elif img_type == self.BANNER: dest_path = self.banner_path(tvdb_id) else: logger.log(u"Invalid cache image type: "+str(img_type), logger.ERROR) return False # make sure the cache folder exists before we try copying to it if not ek.ek(os.path.isdir, self._cache_dir()): logger.log(u"Image cache dir didn't exist, creating it at "+str(self._cache_dir())) ek.ek(os.makedirs, self._cache_dir()) logger.log(u"Copying from "+image_path+" to "+dest_path) helpers.copyFile(image_path, dest_path) return True def _cache_image_from_tvdb(self, show_obj, img_type): """ Retrieves an image of the type specified from TVDB and saves it to the cache folder returns: bool representing success show_obj: TVShow object that we want to cache an image for img_type: BANNER or POSTER """ # generate the path based on the type & tvdb_id if img_type == self.POSTER: img_type_name = 'poster' dest_path = self.poster_path(show_obj.tvdbid) elif img_type == self.BANNER: img_type_name = 'banner' dest_path = self.banner_path(show_obj.tvdbid) else: logger.log(u"Invalid cache image type: "+str(img_type), logger.ERROR) return False # retrieve the image from TVDB using the generic metadata class #TODO: refactor metadata_generator = GenericMetadata() img_data = metadata_generator._retrieve_show_image(img_type_name, show_obj) result = metadata_generator._write_image(img_data, dest_path) return result def fill_cache(self, show_obj): """ Caches all images for the given show. Copies them from the show dir if possible, or downloads them from TVDB if they aren't in the show dir. show_obj: TVShow object to cache images for """ logger.log(u"Checking if we need any cache images for show "+str(show_obj.tvdbid), logger.DEBUG) # check if the images are already cached or not need_images = {self.POSTER: not self.has_poster(show_obj.tvdbid), self.BANNER: not self.has_banner(show_obj.tvdbid), } if not need_images[self.POSTER] and not need_images[self.BANNER]: logger.log(u"No new cache images needed, not retrieving new ones") return # check the show dir for images and use them try: for cur_provider in sickbeard.metadata_provider_dict.values(): logger.log(u"Checking if we can use the show image from the "+cur_provider.name+" metadata", logger.DEBUG) if ek.ek(os.path.isfile, cur_provider.get_poster_path(show_obj)): cur_file_name = os.path.abspath(cur_provider.get_poster_path(show_obj)) cur_file_type = self.which_type(cur_file_name) if cur_file_type == None: logger.log(u"Unable to retrieve image type, not using the image from "+str(cur_file_name), logger.WARNING) continue logger.log(u"Checking if image "+cur_file_name+" (type "+str(cur_file_type)+" needs metadata: "+str(need_images[cur_file_type]), logger.DEBUG) if cur_file_type in need_images and need_images[cur_file_type]: logger.log(u"Found an image in the show dir that doesn't exist in the cache, caching it: "+cur_file_name+", type "+str(cur_file_type), logger.DEBUG) self._cache_image_from_file(cur_file_name, cur_file_type, show_obj.tvdbid) need_images[cur_file_type] = False except exceptions.ShowDirNotFoundException: logger.log(u"Unable to search for images in show dir because it doesn't exist", logger.WARNING) # download from TVDB for missing ones for cur_image_type in [self.POSTER, self.BANNER]: logger.log(u"Seeing if we still need an image of type "+str(cur_image_type)+": "+str(need_images[cur_image_type]), logger.DEBUG) if cur_image_type in need_images and need_images[cur_image_type]: self._cache_image_from_tvdb(show_obj, cur_image_type) logger.log(u"Done cache check")
9,346
Python
.py
173
41.942197
173
0.627411
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,338
slack.py
midgetspy_Sick-Beard/sickbeard/notifiers/slack.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import json import urllib import urllib2 import sickbeard from sickbeard import logger, common class SlackNotifier: def _send_to_slack(self, message, accessToken, channel, bot_name, icon_url): SLACK_ENDPOINT = "https://slack.com/api/chat.postMessage" data = {} data["token"] = accessToken data["channel"] = channel data["username"] = bot_name data["text"] = message data["icon_url"] = icon_url encoded_data = urllib.urlencode(data) req = urllib2.Request(SLACK_ENDPOINT, encoded_data) urlResp = sickbeard.helpers.getURL(req) if urlResp: resp = json.loads(urlResp) else: return False # if ("error" in resp): # raise Exception(resp["error"]) if (resp["ok"] == True): logger.log(u"Slack: Succeeded sending message.", logger.MESSAGE) return True logger.log(u"Slack: Failed sending message: " + resp["error"], logger.ERROR) return False def _notify(self, message, accessToken='', channel='', bot_name='', icon_url='', force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_SLACK and not force: return False if not accessToken: accessToken = sickbeard.SLACK_ACCESS_TOKEN if not channel: channel = sickbeard.SLACK_CHANNEL if not bot_name: bot_name = sickbeard.SLACK_BOT_NAME if not icon_url: icon_url = sickbeard.SLACK_ICON_URL return self._send_to_slack(message, accessToken, channel, bot_name, icon_url) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.SLACK_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH] + ': ' + ep_name) def notify_download(self, ep_name): if sickbeard.SLACK_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ': ' + ep_name) def test_notify(self, accessToken, channel, bot_name, icon_url): return self._notify("This is a test notification from Sick Beard", accessToken, channel, bot_name, icon_url, force=True) def update_library(self, ep_obj): pass notifier = SlackNotifier
3,239
Python
.py
72
38.444444
128
0.641042
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,339
growl.py
midgetspy_Sick-Beard/sickbeard/notifiers/growl.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import socket import sickbeard from sickbeard import logger, common from sickbeard.exceptions import ex from lib.growl import gntp class GrowlNotifier: def _send_growl(self, options, message=None): # send notification notice = gntp.GNTPNotice() #Required notice.add_header('Application-Name', options['app']) notice.add_header('Notification-Name', options['name']) notice.add_header('Notification-Title', options['title']) if options['password']: notice.set_password(options['password']) # optional if options['sticky']: notice.add_header('Notification-Sticky', options['sticky']) if options['priority']: notice.add_header('Notification-Priority', options['priority']) if options['icon']: notice.add_header('Notification-Icon', 'http://www.sickbeard.com/notify.png') if message: notice.add_header('Notification-Text', message) response = self._send(options['host'], options['port'], notice.encode(), options['debug']) if isinstance(response, gntp.GNTPOK): return True return False def _send(self, host, port, data, debug=False): if debug: print '<Sending>\n', data, '\n</Sending>' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) s.send(data) response = gntp.parse_gntp(s.recv(1024)) s.close() if debug: print '<Recieved>\n', response, '\n</Recieved>' return response def _notify(self, title="Sick Beard Notification", message=None, name=None, host=None, password=None, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_GROWL and not force: return False # fill in omitted parameters if not name: name = title if not host: hostParts = sickbeard.GROWL_HOST.split(':') else: hostParts = host.split(':') if len(hostParts) != 2 or hostParts[1] == '': port = 23053 else: port = int(hostParts[1]) growlHosts = [(hostParts[0], port)] opts = {} opts['name'] = name opts['title'] = title opts['app'] = 'SickBeard' opts['sticky'] = None opts['priority'] = None opts['debug'] = False if not password: opts['password'] = sickbeard.GROWL_PASSWORD else: opts['password'] = password opts['icon'] = True # TODO: Multi hosts does not seem to work... registration only happens for the first for pc in growlHosts: print pc opts['host'] = pc[0] opts['port'] = pc[1] logger.log(u"GROWL: Sending message '" + message + "' to " + opts['host'] + ":" + str(opts['port']), logger.DEBUG) try: return self._send_growl(opts, message) except Exception, e: logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING) return False def _sendRegistration(self, host=None, password=None, name="Sick Beard Notification"): opts = {} if not host: hostParts = sickbeard.GROWL_HOST.split(':') else: hostParts = host.split(':') if len(hostParts) != 2 or hostParts[1] == '': port = 23053 else: port = int(hostParts[1]) opts['host'] = hostParts[0] opts['port'] = port if not password: opts['password'] = sickbeard.GROWL_PASSWORD else: opts['password'] = password opts['app'] = 'SickBeard' opts['debug'] = False # send registration register = gntp.GNTPRegister() register.add_header('Application-Name', opts['app']) register.add_header('Application-Icon', 'http://www.sickbeard.com/notify.png') register.add_notification('Test', True) register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True) register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True) if opts['password']: register.set_password(opts['password']) try: return self._send(opts['host'], opts['port'], register.encode(), opts['debug']) except Exception, e: logger.log(u"GROWL: Unable to send growl to " + opts['host'] + ":" + str(opts['port']) + " - " + ex(e), logger.WARNING) return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.GROWL_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.GROWL_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) def test_notify(self, host, password): result = self._sendRegistration(host, password, "Test") if result: return self._notify("Test Growl", "This is a test notification from Sick Beard", "Test", host, password, force=True) else: return result def update_library(self, ep_obj=None): pass notifier = GrowlNotifier
6,346
Python
.py
145
35.013793
135
0.599968
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,340
pushover.py
midgetspy_Sick-Beard/sickbeard/notifiers/pushover.py
# Author: Marvin Pinto <[email protected]> # Author: Dennis Lutter <[email protected]> # Author: Aaron Bieber <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import time import sickbeard from sickbeard import logger from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD from sickbeard.exceptions import ex API_URL = "https://api.pushover.net/1/messages.json" API_KEY = "OKCXmkvHN1syU2e8xvpefTnyvVWGv5" DEVICE_URL = "https://api.pushover.net/1/users/validate.json" class PushoverNotifier: def get_devices(self, userKey=None): # fill in omitted parameters if not userKey: userKey = sickbeard.PUSHOVER_USERKEY data = urllib.urlencode({ 'token': API_KEY, 'user': userKey }) # get devices from pushover req = urllib2.Request(DEVICE_URL, data) return sickbeard.helpers.getURL(req) def _sendPushover(self, title, msg, userKey, priority, device, sound): # build up the URL and parameters msg = msg.strip() data = urllib.urlencode({ 'token': API_KEY, 'title': title, 'user': userKey, 'message': msg.encode('utf-8'), 'priority': priority, 'device': device, 'sound': sound, 'timestamp': int(time.time()) }) # send the request to pushover try: req = urllib2.Request(API_URL, data) handle = sickbeard.helpers.getURLFileLike(req, throw_exc=True) handle.close() except urllib2.URLError, e: # FIXME: Python 2.5 hack, it wrongly reports 201 as an error if hasattr(e, 'code') and e.code == 201: logger.log(u"PUSHOVER: Notification successful.", logger.MESSAGE) return True # if we get an error back that doesn't have an error code then who knows what's really happening if not hasattr(e, 'code'): logger.log(u"PUSHOVER: Notification failed." + ex(e), logger.ERROR) return False else: logger.log(u"PUSHOVER: Notification failed. Error code: " + str(e.code), logger.ERROR) # HTTP status 404 if the provided email address isn't a Pushover user. if e.code == 404: logger.log(u"PUSHOVER: Username is wrong/not a Pushover email. Pushover will send an email to it", logger.WARNING) return False # For HTTP status code 401's, it is because you are passing in either an invalid token, or the user has not added your service. elif e.code == 401: # HTTP status 401 if the user doesn't have the service added subscribeNote = self._sendPushover(title, msg, userKey) if subscribeNote: logger.log(u"PUSHOVER: Subscription sent", logger.DEBUG) return True else: logger.log(u"PUSHOVER: Subscription could not be sent", logger.ERROR) return False # If you receive an HTTP status code of 400, it is because you failed to send the proper parameters elif e.code == 400: logger.log(u"PUSHOVER: Wrong data sent to Pushover", logger.ERROR) return False logger.log(u"PUSHOVER: Notification successful.", logger.MESSAGE) return True def _notify(self, title, message, userKey=None, priority=None, device=None, sound=None, force=False): """ Sends a pushover notification based on the provided info or SB config """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_PUSHOVER and not force: return False # fill in omitted parameters if not userKey: userKey = sickbeard.PUSHOVER_USERKEY if not priority: priority = sickbeard.PUSHOVER_PRIORITY if not device: device = sickbeard.PUSHOVER_DEVICE if not sound: sound = sickbeard.PUSHOVER_SOUND logger.log(u"PUSHOVER: Sending notice with details: title=\"%s\", message=\"%s\", userkey=%s, priority=%s, device=%s, sound=%s" % (title, message, userKey, priority, device, sound), logger.DEBUG) return self._sendPushover(title, message, userKey, priority, device, sound) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.PUSHOVER_NOTIFY_ONSNATCH: self._notify(notifyStrings[NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD: self._notify(notifyStrings[NOTIFY_DOWNLOAD], ep_name) def test_notify(self, userKey, priority, device, sound): return self._notify("Test", "This is a test notification from Sick Beard", userKey, priority, device, sound, force=True) def update_library(self, ep_obj=None): pass notifier = PushoverNotifier
5,927
Python
.py
122
39.434426
203
0.63013
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,341
nmj.py
midgetspy_Sick-Beard/sickbeard/notifiers/nmj.py
# Author: Nico Berlee http://nico.berlee.nl/ # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import sickbeard import telnetlib import re from sickbeard import logger from sickbeard.exceptions import ex try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree class NMJNotifier: def notify_settings(self, host): """ Retrieves the settings from a NMJ/Popcorn Hour host: The hostname/IP of the Popcorn Hour server Returns: True if the settings were retrieved successfully, False otherwise """ # establish a terminal session to the PC terminal = False try: terminal = telnetlib.Telnet(host) except Exception: logger.log(u"NMJ: Unable to get a telnet session to %s" % (host), logger.WARNING) return False # tell the terminal to output the necessary info to the screen so we can search it later logger.log(u"NMJ: Connected to %s via telnet" % (host), logger.DEBUG) terminal.read_until("sh-3.00# ") terminal.write("cat /tmp/source\n") terminal.write("cat /tmp/netshare\n") terminal.write("exit\n") tnoutput = terminal.read_all() database = "" device = "" match = re.search(r"(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)", tnoutput) # if we found the database in the terminal output then save that database to the config if match: database = match.group(1) device = match.group(2) logger.log(u"NMJ: Found NMJ database %s on device %s" % (database, device), logger.DEBUG) sickbeard.NMJ_DATABASE = database else: logger.log(u"NMJ: Could not get current NMJ database on %s, NMJ is probably not running!" % (host), logger.WARNING) return False # if the device is a remote host then try to parse the mounting URL and save it to the config if device.startswith("NETWORK_SHARE/"): match = re.search(".*(?=\r\n?%s)" % (re.escape(device[14:])), tnoutput) if match: mount = match.group().replace("127.0.0.1", host) logger.log(u"NMJ: Found mounting url on the Popcorn Hour in configuration: %s" % (mount), logger.DEBUG) sickbeard.NMJ_MOUNT = mount else: logger.log(u"NMJ: Detected a network share on the Popcorn Hour, but could not get the mounting url", logger.WARNING) return False return True def _sendNMJ(self, host, database, mount=None): """ Sends a NMJ update command to the specified machine host: The hostname/IP to send the request to (no port) database: The database to send the request to mount: The mount URL to use (optional) Returns: True if the request succeeded, False otherwise """ # if a mount URL is provided then attempt to open a handle to that URL if mount: try: req = urllib2.Request(mount) logger.log(u"NMJ: Try to mount network drive via url: %s" % (mount), logger.DEBUG) sickbeard.helpers.getURLFileLike(req) except IOError, e: if hasattr(e, 'reason'): logger.log(u"NMJ: Could not contact Popcorn Hour on host %s: %s" % (host, e.reason), logger.WARNING) elif hasattr(e, 'code'): logger.log(u"NMJ: Problem with Popcorn Hour on host %s: %s" % (host, e.code), logger.WARNING) return False except Exception, e: logger.log(u"NMJ: Unknown exception: " + ex(e), logger.ERROR) return False # build up the request URL and parameters UPDATE_URL = "http://%(host)s:8008/metadata_database?%(params)s" params = { "arg0": "scanner_start", "arg1": database, "arg2": "background", "arg3": "" } params = urllib.urlencode(params) updateUrl = UPDATE_URL % {"host": host, "params": params} # send the request to the server try: req = urllib2.Request(updateUrl) logger.log(u"NMJ: Sending NMJ scan update command via url: %s" % (updateUrl), logger.DEBUG) response = sickbeard.helpers.getURL(req) except IOError, e: if hasattr(e, 'reason'): logger.log(u"NMJ: Could not contact Popcorn Hour on host %s: %s" % (host, e.reason), logger.WARNING) elif hasattr(e, 'code'): logger.log(u"NMJ: Problem with Popcorn Hour on host %s: %s" % (host, e.code), logger.WARNING) return False except Exception, e: logger.log(u"NMJ: Unknown exception: " + ex(e), logger.ERROR) return False # try to parse the resulting XML try: et = etree.fromstring(response) result = et.findtext("returnValue") except SyntaxError, e: logger.log(u"NMJ: Unable to parse XML returned from the Popcorn Hour: %s" % (e), logger.ERROR) return False # if the result was a number then consider that an error if int(result) > 0: logger.log(u"NMJ: Popcorn Hour returned an errorcode: %s" % (result), logger.ERROR) return False else: logger.log(u"NMJ: Started background scan.", logger.MESSAGE) return True def _notifyNMJ(self, host=None, database=None, mount=None, force=False): """ Sends a NMJ update command based on the SB config settings host: The host to send the command to (optional, defaults to the host in the config) database: The database to use (optional, defaults to the database in the config) mount: The mount URL (optional, defaults to the mount URL in the config) force: If True then the notification will be sent even if NMJ is disabled in the config """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_NMJ and not force: return False # fill in omitted parameters if not host: host = sickbeard.NMJ_HOST if not database: database = sickbeard.NMJ_DATABASE if not mount: mount = sickbeard.NMJ_MOUNT logger.log(u"NMJ: Sending scan command.", logger.DEBUG) return self._sendNMJ(host, database, mount) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): pass def notify_download(self, ep_name): pass def test_notify(self, host, database, mount): return self._notifyNMJ(host, database, mount, force=True) def update_library(self, ep_obj=None): if sickbeard.USE_NMJ: self._notifyNMJ() notifier = NMJNotifier
7,997
Python
.py
166
37.656627
133
0.600515
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,342
synoindex.py
midgetspy_Sick-Beard/sickbeard/notifiers/synoindex.py
# Author: Sebastien Erard <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os import subprocess import sickbeard from sickbeard import logger from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex class synoIndexNotifier: def moveFolder(self, old_path, new_path): self.moveObject(old_path, new_path) def moveFile(self, old_file, new_file): self.moveObject(old_file, new_file) def moveObject(self, old_path, new_path): if sickbeard.USE_SYNOINDEX: synoindex_cmd = ['/usr/syno/bin/synoindex', '-N', ek.ek(os.path.abspath, new_path), ek.ek(os.path.abspath, old_path)] logger.log(u"SYNOINDEX: Executing command " + str(synoindex_cmd), logger.DEBUG) logger.log(u"SYNOINDEX: Absolute path to command: " + ek.ek(os.path.abspath, synoindex_cmd[0]), logger.DEBUG) try: p = subprocess.Popen(synoindex_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, err = p.communicate() # @UnusedVariable logger.log(u"SYNOINDEX: Script result: " + str(out), logger.DEBUG) except OSError, e: logger.log(u"SYNOINDEX: Unable to run synoindex: " + ex(e), logger.WARNING) def deleteFolder(self, cur_path): self.makeObject('-D', cur_path) def addFolder(self, cur_path): self.makeObject('-A', cur_path) def deleteFile(self, cur_file): self.makeObject('-d', cur_file) def addFile(self, cur_file): self.makeObject('-a', cur_file) def makeObject(self, cmd_arg, cur_path): if sickbeard.USE_SYNOINDEX: synoindex_cmd = ['/usr/syno/bin/synoindex', cmd_arg, ek.ek(os.path.abspath, cur_path)] logger.log(u"SYNOINDEX: Executing command " + str(synoindex_cmd), logger.DEBUG) logger.log(u"SYNOINDEX: Absolute path to command: " + ek.ek(os.path.abspath, synoindex_cmd[0]), logger.DEBUG) try: p = subprocess.Popen(synoindex_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) out, err = p.communicate() # @UnusedVariable logger.log(u"SYNOINDEX: Script result: " + str(out), logger.DEBUG) except OSError, e: logger.log(u"SYNOINDEX: Unable to run synoindex: " + ex(e), logger.WARNING) def _notify(self, message, title, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_SYNOINDEX and not force: return False synodsmnotify_cmd = ['/usr/syno/bin/synodsmnotify', '@administrators', title, message] logger.log(u"SYNOINDEX: Executing command " + str(synodsmnotify_cmd), logger.DEBUG) try: p = subprocess.Popen(synodsmnotify_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR) output, err = p.communicate() # @UnusedVariable exit_status = p.returncode logger.log(u"SYNOINDEX: Script result: " + str(output), logger.DEBUG) if exit_status == 0: return True else: return False except OSError, e: logger.log(u"SYNOINDEX: Unable to run synodsmnotify: " + ex(e), logger.WARNING) return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.SYNOINDEX_NOTIFY_ONSNATCH: self._notify(notifyStrings[NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.SYNOINDEX_NOTIFY_ONDOWNLOAD: self._notify(notifyStrings[NOTIFY_DOWNLOAD], ep_name) def test_notify(self): return self._notify("This is a test notification from Sick Beard", "Test", force=True) def update_library(self, ep_obj=None): if sickbeard.USE_SYNOINDEX: self.addFile(ep_obj.location) notifier = synoIndexNotifier
4,942
Python
.py
93
44.83871
129
0.64773
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,343
nma.py
midgetspy_Sick-Beard/sickbeard/notifiers/nma.py
# Author: Adam Landry # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard from sickbeard import logger, common from lib.pynma import pynma class NMA_Notifier: def _notify(self, event, message, nma_api=None, nma_priority=None, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_NMA and not force: return False # fill in omitted parameters if not nma_api: nma_api = sickbeard.NMA_API if not nma_priority: nma_priority = sickbeard.NMA_PRIORITY batch = False p = pynma.PyNMA() keys = nma_api.split(',') p.addkey(keys) if len(keys) > 1: batch = True logger.log(u"NMA: Sending notice with details: event=\"%s\", message=\"%s\", priority=%s, batch=%s" % (event, message, nma_priority, batch), logger.DEBUG) response = p.push("Sick Beard", event, message, priority=nma_priority, batch_mode=batch) if not response[nma_api][u'code'] == u'200': logger.log(u"NMA: Could not send notification to NotifyMyAndroid", logger.ERROR) return False else: logger.log(u"NMA: Notification sent to NotifyMyAndroid", logger.MESSAGE) return True ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.NMA_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.NMA_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) def test_notify(self, nma_api, nma_priority): return self._notify("Test", "This is a test notification from Sick Beard", nma_api, nma_priority, force=True) def update_library(self, ep_obj=None): pass notifier = NMA_Notifier
2,820
Python
.py
58
41.103448
163
0.634263
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,344
trakt.py
midgetspy_Sick-Beard/sickbeard/notifiers/trakt.py
# Author: Dieter Blomme <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib2 from hashlib import sha1 try: import json except ImportError: from lib import simplejson as json import sickbeard from sickbeard import logger class TraktNotifier: def _notifyTrakt(self, method, api, username, password, data={}, force=False): """ A generic method for communicating with trakt. Uses the method and data provided along with the auth info to send the command. method: The URL to use at trakt, relative, no leading slash. api: The API string to provide to trakt username: The username to use when logging in password: The unencrypted password to use when logging in Returns: A boolean representing success """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_TRAKT and not force: return False logger.log(u"TRAKT: Calling method " + method, logger.DEBUG) # if the API isn't given then use the config API if not api: api = sickbeard.TRAKT_API # if the username isn't given then use the config username if not username: username = sickbeard.TRAKT_USERNAME # if the password isn't given then use the config password if not password: password = sickbeard.TRAKT_PASSWORD password = sha1(password).hexdigest() # append apikey to method method += api data["username"] = username data["password"] = password # take the URL params and make a json object out of them encoded_data = json.dumps(data) # request the URL from trakt and parse the result as json logger.log(u"TRAKT: Calling method http://api.trakt.tv/" + method + ", with data" + encoded_data, logger.DEBUG) req = urllib2.Request("http://api.trakt.tv/" + method, encoded_data) urlResp = sickbeard.helpers.getURL(req) if urlResp: resp = json.loads(urlResp) else: return False if ("error" in resp): raise Exception(resp["error"]) if (resp["status"] == "success"): logger.log(u"TRAKT: Succeeded calling method. Result: " + resp["message"], logger.MESSAGE) return True logger.log(u"TRAKT: Failed calling method", logger.ERROR) return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): pass def notify_download(self, ep_name): pass def test_notify(self, api, username, password): """ Sends a test notification to trakt with the given authentication info and returns a boolean representing success. api: The api string to use username: The username to use password: The password to use Returns: True if the request succeeded, False otherwise """ method = "account/test/" return self._notifyTrakt(method, api, username, password, {}, force=True) def update_library(self, ep_obj=None): """ Sends a request to trakt indicating that the given episode is part of our library. ep_obj: The TVEpisode object to add to trakt """ if sickbeard.USE_TRAKT: method = "show/episode/library/" # URL parameters data = { 'tvdb_id': ep_obj.show.tvdbid, 'title': ep_obj.show.name, 'year': ep_obj.show.startyear, 'episodes': [ { 'season': ep_obj.season, 'episode': ep_obj.episode } ] } if data: self._notifyTrakt(method, None, None, None, data) notifier = TraktNotifier
4,760
Python
.py
109
34.834862
119
0.614335
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,345
pytivo.py
midgetspy_Sick-Beard/sickbeard/notifiers/pytivo.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os import sickbeard from urllib import urlencode from urllib2 import Request from sickbeard import logger from sickbeard import encodingKludge as ek class pyTivoNotifier: ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): pass def notify_download(self, ep_name): pass def update_library(self, ep_obj=None): if not sickbeard.USE_PYTIVO: return False host = sickbeard.PYTIVO_HOST shareName = sickbeard.PYTIVO_SHARE_NAME tsn = sickbeard.PYTIVO_TIVO_NAME # There are two more values required, the container and file. # # container: The share name, show name and season # # file: The file name # # Some slicing and dicing of variables is required to get at these values. # Calculated values showPath = ep_obj.show.location showName = ep_obj.show.name rootShowAndSeason = ek.ek(os.path.dirname, ep_obj.location) absPath = ep_obj.location # Some show names have colons in them which are illegal in a path location, so strip them out. # (Are there other characters?) showName = showName.replace(":", "") root = showPath.replace(showName, "") showAndSeason = rootShowAndSeason.replace(root, "") container = shareName + "/" + showAndSeason mediaFile = "/" + absPath.replace(root, "") # Finally create the url and make request requestUrl = "http://" + host + "/TiVoConnect?" + urlencode( {'Command': 'Push', 'Container': container, 'File': mediaFile, 'tsn': tsn}) logger.log(u"PYTIVO: Requesting " + requestUrl, logger.DEBUG) request = Request(requestUrl) if sickbeard.helpers.getURLFileLike(request) is None: logger.log(u"PYTIVO: Could not successfully request transfer of file", logger.ERROR) return False else: logger.log(u"PYTIVO: Successfully requested transfer of file", logger.MESSAGE) return True notifier = pyTivoNotifier
3,062
Python
.py
67
38.328358
145
0.637225
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,346
boxcar2.py
midgetspy_Sick-Beard/sickbeard/notifiers/boxcar2.py
# Author: Marvin Pinto <[email protected]> # Author: Dennis Lutter <[email protected]> # Author: Shawn Conroyd <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import sickbeard from sickbeard import logger from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD from sickbeard.exceptions import ex API_URL = "https://new.boxcar.io/api/notifications" class Boxcar2Notifier: def _sendBoxcar2(self, title, msg, accessToken, sound): """ Sends a boxcar2 notification to the address provided msg: The message to send (unicode) title: The title of the message accessToken: The access token to send notification to returns: True if the message succeeded, False otherwise """ # build up the URL and parameters msg = msg.strip().encode('utf-8') data = urllib.urlencode({ 'user_credentials': accessToken, 'notification[title]': title + " - " + msg, 'notification[long_message]': msg, 'notification[sound]': sound, 'notification[source_name]': "SickBeard" }) # send the request to boxcar2 try: req = urllib2.Request(API_URL, data) handle = sickbeard.helpers.getURLFileLike(req, throw_exc=True) handle.close() except urllib2.URLError, e: # FIXME: Python 2.5 hack, it wrongly reports 201 as an error if hasattr(e, 'code') and e.code == 201: logger.log(u"BOXCAR2: Notification successful.", logger.MESSAGE) return True # if we get an error back that doesn't have an error code then who knows what's really happening if not hasattr(e, 'code'): logger.log(u"BOXCAR2: Notification failed." + ex(e), logger.ERROR) else: logger.log(u"BOXCAR2: Notification failed. Error code: " + str(e.code), logger.ERROR) if e.code == 404: logger.log(u"BOXCAR2: Access token is wrong/not associated to a device.", logger.ERROR) elif e.code == 401: logger.log(u"BOXCAR2: Access token not recognized.", logger.ERROR) elif e.code == 400: logger.log(u"BOXCAR2: Wrong data sent to boxcar.", logger.ERROR) elif e.code == 503: logger.log(u"BOXCAR2: Boxcar server to busy to handle the request at this time.", logger.WARNING) return False logger.log(u"BOXCAR2: Notification successful.", logger.MESSAGE) return True def _notify(self, title, message, accessToken=None, sound=None, force=False): """ Sends a boxcar2 notification based on the provided info or SB config title: The title of the notification to send message: The message string to send accessToken: The access token to send the notification to (optional, defaults to the access token in the config) force: If True then the notification will be sent even if Boxcar is disabled in the config """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_BOXCAR2 and not force: return False # fill in omitted parameters if not accessToken: accessToken = sickbeard.BOXCAR2_ACCESS_TOKEN if not sound: sound = sickbeard.BOXCAR2_SOUND logger.log(u"BOXCAR2: Sending notification for " + message, logger.DEBUG) return self._sendBoxcar2(title, message, accessToken, sound) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.BOXCAR2_NOTIFY_ONSNATCH: self._notify(notifyStrings[NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD: self._notify(notifyStrings[NOTIFY_DOWNLOAD], ep_name) def test_notify(self, accessToken, sound): return self._notify("Test", "This is a test notification from Sick Beard", accessToken, sound, force=True) def update_library(self, ep_obj=None): pass notifier = Boxcar2Notifier
5,043
Python
.py
102
41.27451
120
0.647945
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,347
prowl.py
midgetspy_Sick-Beard/sickbeard/notifiers/prowl.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from httplib import HTTPSConnection from urllib import urlencode import sickbeard from sickbeard.exceptions import ex from sickbeard import common from sickbeard import logger class ProwlNotifier: def _notify(self, event, message, prowl_api=None, prowl_priority=None, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_PROWL and not force: return False # fill in omitted parameters if not prowl_api: prowl_api = sickbeard.PROWL_API if not prowl_priority: prowl_priority = sickbeard.PROWL_PRIORITY logger.log(u"PROWL: Sending notice with details: event=\"%s\", message=\"%s\", priority=%s, api=%s" % (event, message, prowl_priority, prowl_api), logger.DEBUG) try: http_handler = HTTPSConnection("api.prowlapp.com") data = {'apikey': prowl_api, 'application': "SickBeard", 'event': event, 'description': message.encode('utf-8'), 'priority': prowl_priority } http_handler.request("POST", "/publicapi/add", headers={'Content-type': "application/x-www-form-urlencoded"}, body=urlencode(data) ) response = http_handler.getresponse() request_status = response.status except Exception, e: logger.log(u"PROWL: Notification failed: " + ex(e), logger.ERROR) return False if request_status == 200: logger.log(u"PROWL: Notifications sent.", logger.MESSAGE) return True elif request_status == 401: logger.log(u"PROWL: Auth failed: %s" % response.reason, logger.ERROR) return False elif request_status == 406: logger.log(u"PROWL: Message throttle limit reached.", logger.WARNING) return False else: logger.log(u"PROWL: Notification failed.", logger.ERROR) return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.PROWL_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.PROWL_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) def test_notify(self, prowl_api, prowl_priority): return self._notify("Test", "This is a test notification from Sick Beard", prowl_api, prowl_priority, force=True) def update_library(self, ep_obj=None): pass notifier = ProwlNotifier
3,705
Python
.py
78
38.269231
168
0.616366
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,348
nmjv2.py
midgetspy_Sick-Beard/sickbeard/notifiers/nmjv2.py
# Author: Jasper Lanting # Based on nmj.py by Nico Berlee: http://nico.berlee.nl/ # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib2 from xml.dom.minidom import parseString import sickbeard import time from sickbeard import logger try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree class NMJv2Notifier: def notify_settings(self, host, dbloc, instance): """ Retrieves the NMJv2 database location from Popcorn Hour host: The hostname/IP of the Popcorn Hour server dbloc: 'local' for PCH internal harddrive. 'network' for PCH network shares instance: Allows for selection of different DB in case of multiple databases Returns: True if the settings were retrieved successfully, False otherwise """ try: url_loc = "http://" + host + ":8008/file_operation?arg0=list_user_storage_file&arg1=&arg2=" + instance + "&arg3=20&arg4=true&arg5=true&arg6=true&arg7=all&arg8=name_asc&arg9=false&arg10=false" req = urllib2.Request(url_loc) response1 = sickbeard.helpers.getURL(req) # TODO: convert to etree? xml = parseString(response1) time.sleep(0.5) for node in xml.getElementsByTagName('path'): xmlTag = node.toxml() xmlData = xmlTag.replace('<path>', '').replace('</path>', '').replace('[=]', '') url_db = "http://" + host + ":8008/metadata_database?arg0=check_database&arg1=" + xmlData reqdb = urllib2.Request(url_db) responsedb = sickbeard.helpers.getURL(reqdb) xmldb = parseString(responsedb) returnvalue = xmldb.getElementsByTagName('returnValue')[0].toxml().replace('<returnValue>', '').replace('</returnValue>', '') if returnvalue == "0": DB_path = xmldb.getElementsByTagName('database_path')[0].toxml().replace('<database_path>', '').replace('</database_path>', '').replace('[=]', '') if dbloc == "local" and DB_path.find("localhost") > -1: sickbeard.NMJv2_HOST = host sickbeard.NMJv2_DATABASE = DB_path return True if dbloc == "network" and DB_path.find("://") > -1: sickbeard.NMJv2_HOST = host sickbeard.NMJv2_DATABASE = DB_path return True except IOError, e: logger.log(u"NMJv2: Could not contact Popcorn Hour on host %s: %s" % (host, e), logger.WARNING) return False return False def _sendNMJ(self, host): """ Sends a NMJ update command to the specified machine host: The hostname/IP to send the request to (no port) database: The database to send the request to mount: The mount URL to use (optional) Returns: True if the request succeeded, False otherwise """ #if a host is provided then attempt to open a handle to that URL try: url_scandir = "http://" + host + ":8008/metadata_database?arg0=update_scandir&arg1=" + sickbeard.NMJv2_DATABASE + "&arg2=&arg3=update_all" logger.log(u"NMJv2: Scan update command send to host: %s" % (host), logger.DEBUG) url_updatedb = "http://" + host + ":8008/metadata_database?arg0=scanner_start&arg1=" + sickbeard.NMJv2_DATABASE + "&arg2=background&arg3=" logger.log(u"NMJv2: Try to mount network drive via url: %s" % (host), logger.DEBUG) prereq = urllib2.Request(url_scandir) req = urllib2.Request(url_updatedb) response1 = sickbeard.helpers.getURL(prereq) time.sleep(0.5) response2 = sickbeard.helpers.getURL(req) except IOError, e: logger.log(u"NMJv2: Could not contact Popcorn Hour on host %s: %s" % (host, e), logger.WARNING) return False try: et = etree.fromstring(response1) result1 = et.findtext("returnValue") except SyntaxError, e: logger.log(u"NMJv2: Unable to parse XML returned from the Popcorn Hour: update_scandir, %s" % (e), logger.ERROR) return False try: et = etree.fromstring(response2) result2 = et.findtext("returnValue") except SyntaxError, e: logger.log(u"NMJv2: Unable to parse XML returned from the Popcorn Hour: scanner_start, %s" % (e), logger.ERROR) return False # if the result was a number then consider that an error error_codes = ["8", "11", "22", "49", "50", "51", "60"] error_messages = ["Invalid parameter(s)/argument(s)", "Invalid database path", "Insufficient size", "Database write error", "Database read error", "Open fifo pipe failed", "Read only file system"] if int(result1) > 0: index = error_codes.index(result1) logger.log(u"NMJv2: Popcorn Hour returned an error: %s" % (error_messages[index]), logger.ERROR) return False else: if int(result2) > 0: index = error_codes.index(result2) logger.log(u"NMJv2: Popcorn Hour returned an error: %s" % (error_messages[index]), logger.ERROR) return False else: logger.log(u"NMJv2: Started background scan.", logger.MESSAGE) return True def _notifyNMJ(self, host=None, force=False): """ Sends a NMJ update command based on the SB config settings host: The host to send the command to (optional, defaults to the host in the config) database: The database to use (optional, defaults to the database in the config) mount: The mount URL (optional, defaults to the mount URL in the config) force: If True then the notification will be sent even if NMJ is disabled in the config """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_NMJv2 and not force: return False # fill in omitted parameters if not host: host = sickbeard.NMJv2_HOST logger.log(u"NMJv2: Sending scan command.", logger.DEBUG) return self._sendNMJ(host) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): pass def notify_download(self, ep_name): pass def test_notify(self, host): return self._notifyNMJ(host, force=True) def update_library(self, ep_obj=None): if sickbeard.USE_NMJv2: self._notifyNMJ() notifier = NMJv2Notifier
7,849
Python
.py
149
40.959732
204
0.593145
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,349
__init__.py
midgetspy_Sick-Beard/sickbeard/notifiers/__init__.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import xbmc import plex import nmj import nmjv2 import synoindex import pytivo import growl import prowl from . import libnotify import pushover import boxcar2 import nma import pushalot import pushbullet import slack import tweet import trakt from sickbeard.common import * from sickbeard import logger from sickbeard.exceptions import ex # home theater/nas xbmc_notifier = xbmc.XBMCNotifier() plex_notifier = plex.PLEXNotifier() nmj_notifier = nmj.NMJNotifier() nmjv2_notifier = nmjv2.NMJv2Notifier() synoindex_notifier = synoindex.synoIndexNotifier() pytivo_notifier = pytivo.pyTivoNotifier() # devices growl_notifier = growl.GrowlNotifier() prowl_notifier = prowl.ProwlNotifier() libnotify_notifier = libnotify.LibnotifyNotifier() pushover_notifier = pushover.PushoverNotifier() boxcar2_notifier = boxcar2.Boxcar2Notifier() nma_notifier = nma.NMA_Notifier() pushalot_notifier = pushalot.PushalotNotifier() pushbullet_notifier = pushbullet.PushbulletNotifier() # social slack_notifier = slack.SlackNotifier() twitter_notifier = tweet.TwitterNotifier() trakt_notifier = trakt.TraktNotifier() notifiers = [ libnotify_notifier, # Libnotify notifier goes first because it doesn't involve blocking on network activity. xbmc_notifier, plex_notifier, nmj_notifier, nmjv2_notifier, synoindex_notifier, pytivo_notifier, growl_notifier, prowl_notifier, pushover_notifier, boxcar2_notifier, nma_notifier, pushalot_notifier, pushbullet_notifier, slack_notifier, twitter_notifier, trakt_notifier, ] def notify_download(ep_name): for n in notifiers: try: n.notify_download(ep_name) except Exception, e: logger.log(n.__class__.__name__ + ": " + ex(e), logger.ERROR) def notify_snatch(ep_name): for n in notifiers: try: n.notify_snatch(ep_name) except Exception, e: logger.log(n.__class__.__name__ + ": " + ex(e), logger.ERROR) def update_library(ep_obj): for n in notifiers: try: n.update_library(ep_obj=ep_obj) except Exception, e: logger.log(n.__class__.__name__ + ": " + ex(e), logger.ERROR)
3,071
Python
.py
95
27.947368
114
0.719322
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,350
libnotify.py
midgetspy_Sick-Beard/sickbeard/notifiers/libnotify.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os import cgi import sickbeard from sickbeard import logger, common def diagnose(): ''' Check the environment for reasons libnotify isn't working. Return a user-readable message indicating possible issues. ''' try: import pynotify # @UnusedImport except ImportError: return (u"<p>Error: pynotify isn't installed. On Ubuntu/Debian, install the " u"<a href=\"apt:python-notify\">python-notify</a> package.") if 'DISPLAY' not in os.environ and 'DBUS_SESSION_BUS_ADDRESS' not in os.environ: return (u"<p>Error: Environment variables DISPLAY and DBUS_SESSION_BUS_ADDRESS " u"aren't set. libnotify will only work when you run Sick Beard " u"from a desktop login.") try: import dbus except ImportError: pass else: try: bus = dbus.SessionBus() except dbus.DBusException, e: return (u"<p>Error: unable to connect to D-Bus session bus: <code>%s</code>." u"<p>Are you running Sick Beard in a desktop session?") % (cgi.escape(e),) try: bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications') except dbus.DBusException, e: return (u"<p>Error: there doesn't seem to be a notification daemon available: <code>%s</code> " u"<p>Try installing notification-daemon or notify-osd.") % (cgi.escape(e),) return u"<p>Error: Unable to send notification." class LibnotifyNotifier: def __init__(self): self.pynotify = None self.gobject = None def init_pynotify(self): if self.pynotify is not None: return True try: import pynotify except ImportError: logger.log(u"LIBNOTIFY: Unable to import pynotify. libnotify notifications won't work.", logger.ERROR) return False try: import gobject except ImportError: logger.log(u"LIBNOTIFY: Unable to import gobject. We can't catch a GError in display.", logger.ERROR) return False if not pynotify.init('Sick Beard'): logger.log(u"LIBNOTIFY: Initialization of pynotify failed. libnotify notifications won't work.", logger.ERROR) return False self.pynotify = pynotify self.gobject = gobject return True def _notify(self, title, message, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_LIBNOTIFY and not force: return False # detect if we can use pynotify if not self.init_pynotify(): return False # Can't make this a global constant because PROG_DIR isn't available # when the module is imported. icon_path = os.path.join(sickbeard.PROG_DIR, "data/images/sickbeard_touch_icon.png") icon_uri = "file://" + os.path.abspath(icon_path) # If the session bus can't be acquired here a bunch of warning messages # will be printed but the call to show() will still return True. # pynotify doesn't seem too keen on error handling. n = self.pynotify.Notification(title, message, icon_uri) try: return n.show() except self.gobject.GError: return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) def test_notify(self): return self._notify("Test", "This is a test notification from Sick Beard", force=True) def update_library(self, ep_obj=None): pass notifier = LibnotifyNotifier
4,896
Python
.py
108
37.518519
122
0.640956
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,351
plex.py
midgetspy_Sick-Beard/sickbeard/notifiers/plex.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import sickbeard from sickbeard import logger from sickbeard import common from sickbeard.exceptions import ex from sickbeard.encodingKludge import fixStupidEncodings try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class PLEXNotifier: def _send_to_plex(self, command, host, username=None, password=None): """Handles communication to Plex hosts via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the legacy xbmcCmds HTTP API host: Plex host:port username: Plex API username password: Plex API password Returns: Returns 'OK' for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.PLEX_USERNAME if not password: password = sickbeard.PLEX_PASSWORD if not host: logger.log(u"PLEX: No host specified, check your settings", logger.ERROR) return False for key in command: if type(command[key]) == unicode: command[key] = command[key].encode('utf-8') enc_command = urllib.urlencode(command) logger.log(u"PLEX: Encoded API command: " + enc_command, logger.DEBUG) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib2.Request(url) # if we have a password, use authentication if password: pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, url, username, password) else: pw_mgr = None result = sickbeard.helpers.getURL(req, password_mgr=pw_mgr) logger.log(u"PLEX: HTTP response: " + result.replace('\n', ''), logger.DEBUG) # could return result response = re.compile('<html><li>(.+\w)</html>').findall(result) return 'OK' except (urllib2.URLError, IOError), e: logger.log(u"PLEX: Warning: Couldn't contact Plex at " + fixStupidEncodings(url) + " " + ex(e), logger.WARNING) return False def _notify(self, message, title="Sick Beard", host=None, username=None, password=None, force=False): """Internal wrapper for the notify_snatch and notify_download functions Args: message: Message body of the notice to send title: Title of the notice to send host: Plex Media Client(s) host:port username: Plex username password: Plex password force: Used for the Test method to override config safety checks Returns: Returns a list results in the format of host:ip:result The result will either be 'OK' or False, this is used to be parsed by the calling function. """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_PLEX and not force: return False # fill in omitted parameters if not host: host = sickbeard.PLEX_HOST if not username: username = sickbeard.PLEX_USERNAME if not password: password = sickbeard.PLEX_PASSWORD result = '' for curHost in [x.strip() for x in host.split(",")]: logger.log(u"PLEX: Sending notification to '" + curHost + "' - " + message, logger.MESSAGE) command = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + title.encode("utf-8") + ',' + message.encode("utf-8") + ')'} notifyResult = self._send_to_plex(command, curHost, username, password) if notifyResult: result += curHost + ':' + str(notifyResult) return result ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.PLEX_NOTIFY_ONSNATCH: self._notify(ep_name, common.notifyStrings[common.NOTIFY_SNATCH]) def notify_download(self, ep_name): if sickbeard.PLEX_NOTIFY_ONDOWNLOAD: self._notify(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD]) def test_notify(self, host, username, password): return self._notify("This is a test notification from Sick Beard", "Test", host, username, password, force=True) def update_library(self, ep_obj=None, host=None, username=None, password=None): """Handles updating the Plex Media Server host via HTTP API Plex Media Server currently only supports updating the whole video library and not a specific path. Returns: Returns True or False """ # fill in omitted parameters if not host: host = sickbeard.PLEX_SERVER_HOST if not username: username = sickbeard.PLEX_USERNAME if not password: password = sickbeard.PLEX_PASSWORD if sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY: if not sickbeard.PLEX_SERVER_HOST: logger.log(u"PLEX: No Plex Media Server host specified, check your settings", logger.DEBUG) return False logger.log(u"PLEX: Updating library for the Plex Media Server host: " + host, logger.MESSAGE) # if username and password were provided, fetch the auth token from plex.tv token_arg = "" if username and password: logger.log(u"PLEX: fetching credentials for Plex user: " + username, logger.DEBUG) url = "https://plex.tv/users/sign_in.xml" req = urllib2.Request(url, data="") pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, url, username, password) req.add_header("X-Plex-Product", "Sick Beard Notifier") req.add_header("X-Plex-Client-Identifier", "5f48c063eaf379a565ff56c9bb2b401e") req.add_header("X-Plex-Version", "1.0") try: response = sickbeard.helpers.getURL(req, throw_exc=True) auth_tree = etree.fromstring(response) token = auth_tree.findall(".//authentication-token")[0].text token_arg = "?X-Plex-Token=" + token except urllib2.URLError as e: logger.log(u"PLEX: Error fetching credentials from from plex.tv for user %s: %s" % (username, ex(e)), logger.MESSAGE) except (ValueError, IndexError) as e: logger.log(u"PLEX: Error parsing plex.tv response: " + ex(e), logger.MESSAGE) url = "http://%s/library/sections%s" % (sickbeard.PLEX_SERVER_HOST, token_arg) try: xml_tree = etree.fromstring(sickbeard.helpers.getURL(url)) media_container = xml_tree.getroot() except IOError, e: logger.log(u"PLEX: Error while trying to contact Plex Media Server: " + ex(e), logger.ERROR) return False sections = media_container.findall('.//Directory') if not sections: logger.log(u"PLEX: Plex Media Server not running on: " + sickbeard.PLEX_SERVER_HOST, logger.MESSAGE) return False for section in sections: if section.attrib['type'] == "show": url = "http://%s/library/sections/%s/refresh%s" % (sickbeard.PLEX_SERVER_HOST, section.attrib['key'], token_arg) if sickbeard.helpers.getURLFileLike(url) is None: logger.log(u"PLEX: Error updating library section for Plex Media Server", logger.ERROR) return False return True notifier = PLEXNotifier
9,037
Python
.py
167
41.628743
141
0.605842
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,352
pushalot.py
midgetspy_Sick-Beard/sickbeard/notifiers/pushalot.py
# Author: Maciej Olesinski (https://github.com/molesinski/) # Based on prowl.py by Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from httplib import HTTPSConnection from urllib import urlencode import sickbeard from sickbeard.exceptions import ex from sickbeard import common from sickbeard import logger class PushalotNotifier: def _notify(self, title, message, authtoken=None, silent=None, important=None, force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_PUSHALOT and not force: return False # fill in omitted parameters if not authtoken: authtoken = sickbeard.PUSHALOT_AUTHORIZATIONTOKEN if not important: important = bool(sickbeard.PUSHALOT_IMPORTANT) if not silent: silent = bool(sickbeard.PUSHALOT_SILENT) logger.log(u"PUSHALOT: Sending notice with details: title=\"%s\", message=\"%s\", silent=%s, important=%s, authtoken=%s" % (title, message, silent, important, authtoken), logger.DEBUG) try: http_handler = HTTPSConnection("pushalot.com") data = {'AuthorizationToken': authtoken, 'Title': title.encode('utf-8'), 'Body': message.encode('utf-8'), 'IsImportant': important, 'IsSilent': silent, 'Source': 'SickBeard' } http_handler.request("POST", "/api/sendmessage", headers={'Content-type': "application/x-www-form-urlencoded"}, body=urlencode(data) ) response = http_handler.getresponse() request_status = response.status except Exception, e: logger.log(u"PUSHALOT: Notification failed: " + ex(e), logger.ERROR) return False if request_status == 200: logger.log(u"PUSHALOT: Notifications sent.", logger.MESSAGE) return True elif request_status == 400: logger.log(u"PUSHALOT: Auth failed: %s" % response.reason, logger.ERROR) return False elif request_status == 406: logger.log(u"PUSHALOT: Message throttle limit reached.", logger.WARNING) return False elif request_status == 410: logger.log(u"PUSHALOT: The AuthorizationToken is invalid.", logger.ERROR) return False elif request_status == 503: logger.log(u"PUSHALOT: Notification servers are currently overloaded with requests. Try again later.", logger.ERROR) return False else: logger.log(u"PUSHALOT: Notification failed.", logger.ERROR) return False ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.PUSHALOT_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name) def test_notify(self, authtoken, silent, important): return self._notify("Test", "This is a test notification from Sick Beard", authtoken, silent, important, force=True) def update_library(self, ep_obj=None): pass notifier = PushalotNotifier
4,296
Python
.py
87
40.183908
192
0.63124
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,353
pushbullet.py
midgetspy_Sick-Beard/sickbeard/notifiers/pushbullet.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import socket import sickbeard from sickbeard import logger from sickbeard.common import notifyStrings, NOTIFY_SNATCH, NOTIFY_DOWNLOAD from sickbeard.exceptions import ex PUSHAPI_ENDPOINT = "https://api.pushbullet.com/v2/pushes" DEVICEAPI_ENDPOINT = "https://api.pushbullet.com/v2/devices" class PushbulletNotifier: def get_devices(self, accessToken=None): # fill in omitted parameters if not accessToken: accessToken = sickbeard.PUSHBULLET_ACCESS_TOKEN # get devices from pushbullet req = urllib2.Request(DEVICEAPI_ENDPOINT) pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, DEVICEAPI_ENDPOINT, accessToken, '') return sickbeard.helpers.getURL(req, password_mgr=pw_mgr) def _sendPushbullet(self, title, body, accessToken, device_iden): # build up the URL and parameters body = body.strip().encode('utf-8') data = urllib.urlencode({ 'type': 'note', 'title': title, 'body': body, 'device_iden': device_iden }) # send the request to pushbullet try: req = urllib2.Request(PUSHAPI_ENDPOINT, data) pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, PUSHAPI_ENDPOINT, accessToken, '') handle = sickbeard.helpers.getURLFileLike(req, password_mgr=pw_mgr, throw_exc=True) handle.close() except socket.timeout: return False except urllib2.URLError, e: # FIXME: Python 2.5 hack, it wrongly reports 201 as an error if hasattr(e, 'code') and e.code == 201: logger.log(u"PUSHBULLET: Notification successful.", logger.MESSAGE) return True # if we get an error back that doesn't have an error code then who knows what's really happening if not hasattr(e, 'code'): logger.log(u"PUSHBULLET: Notification failed." + ex(e), logger.ERROR) else: logger.log(u"PUSHBULLET: Notification failed. Error code: " + str(e.code), logger.ERROR) if e.code == 404: logger.log(u"PUSHBULLET: Access token is wrong/not associated to a device.", logger.ERROR) elif e.code == 401: logger.log(u"PUSHBULLET: Unauthorized, not a valid access token.", logger.ERROR) elif e.code == 400: logger.log(u"PUSHBULLET: Bad request, missing required parameter.", logger.ERROR) elif e.code == 503: logger.log(u"PUSHBULLET: Pushbullet server to busy to handle the request at this time.", logger.WARNING) return False logger.log(u"PUSHBULLET: Notification successful.", logger.MESSAGE) return True def _notify(self, title, body, accessToken=None, device_iden=None, force=False): """ Sends a pushbullet notification based on the provided info or SB config title: The title of the notification to send body: The body string to send accessToken: The access token to grant access device_iden: The iden of a specific target, if none provided send to all devices force: If True then the notification will be sent even if Pushbullet is disabled in the config """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_PUSHBULLET and not force: return False # fill in omitted parameters if not accessToken: accessToken = sickbeard.PUSHBULLET_ACCESS_TOKEN if not device_iden: device_iden = sickbeard.PUSHBULLET_DEVICE_IDEN logger.log(u"PUSHBULLET: Sending notice with details: title=\"%s\", body=\"%s\", device_iden=\"%s\"" % (title, body, device_iden), logger.DEBUG) return self._sendPushbullet(title, body, accessToken, device_iden) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.PUSHBULLET_NOTIFY_ONSNATCH: self._notify(notifyStrings[NOTIFY_SNATCH], ep_name) def notify_download(self, ep_name): if sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD: self._notify(notifyStrings[NOTIFY_DOWNLOAD], ep_name) def test_notify(self, accessToken, device_iden): return self._notify("Test", "This is a test notification from Sick Beard", accessToken, device_iden, force=True) def update_library(self, ep_obj=None): pass notifier = PushbulletNotifier
5,523
Python
.py
108
42.787037
152
0.654852
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,354
xbmc.py
midgetspy_Sick-Beard/sickbeard/notifiers/xbmc.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import urllib2 import socket import time import sickbeard from sickbeard import logger from sickbeard import common from sickbeard.exceptions import ex from sickbeard.encodingKludge import fixStupidEncodings try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree try: import json except ImportError: from lib import simplejson as json class XBMCNotifier: sb_logo_url = "http://www.sickbeard.com/notify.png" def _get_xbmc_version(self, host, username, password): """Returns XBMC JSON-RPC API version (odd # = dev, even # = stable) Sends a request to the XBMC host using the JSON-RPC to determine if the legacy API or if the JSON-RPC API functions should be used. Fallback to testing legacy HTTPAPI before assuming it is just a badly configured host. Args: host: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password Returns: Returns API number or False List of possible known values: API | XBMC Version -----+--------------- 2 | v10 (Dharma) 3 | (pre Eden) 4 | v11 (Eden) 5 | (pre Frodo) 6 | v12 (Frodo) / v13 (Gotham) """ # since we need to maintain python 2.5 compatibility we can not pass a timeout delay to urllib2 directly (python 2.6+) # override socket timeout to reduce delay for this call alone socket.setdefaulttimeout(10) checkCommand = '{"jsonrpc":"2.0","method":"JSONRPC.Version","id":1}' result = self._send_to_xbmc_json(checkCommand, host, username, password) # revert back to default socket timeout socket.setdefaulttimeout(sickbeard.SOCKET_TIMEOUT) if result: return result["result"]["version"] else: # fallback to legacy HTTPAPI method testCommand = {'command': 'Help'} request = self._send_to_xbmc(testCommand, host, username, password) if request: # return a fake version number, so it uses the legacy method return 1 else: return False def _notify(self, message, title="Sick Beard", host=None, username=None, password=None, force=False): """Internal wrapper for the notify_snatch and notify_download functions Detects JSON-RPC version then branches the logic for either the JSON-RPC or legacy HTTP API methods. Args: message: Message body of the notice to send title: Title of the notice to send host: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password force: Used for the Test method to override config safety checks Returns: Returns a list results in the format of host:ip:result The result will either be 'OK' or False, this is used to be parsed by the calling function. """ # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_XBMC and not force: return False # fill in omitted parameters if not host: host = sickbeard.XBMC_HOST if not username: username = sickbeard.XBMC_USERNAME if not password: password = sickbeard.XBMC_PASSWORD result = '' for curHost in [x.strip() for x in host.split(",")]: logger.log(u"XBMC: Sending XBMC notification to '" + curHost + "' - " + message, logger.MESSAGE) xbmcapi = self._get_xbmc_version(curHost, username, password) if xbmcapi: if (xbmcapi <= 4): logger.log(u"XBMC: Detected XBMC version <= 11, using XBMC HTTP API", logger.DEBUG) command = {'command': 'ExecBuiltIn', 'parameter': 'Notification(' + title.encode("utf-8") + ',' + message.encode("utf-8") + ')'} notifyResult = self._send_to_xbmc(command, curHost, username, password) if notifyResult: result += curHost + ':' + str(notifyResult) else: logger.log(u"XBMC: Detected XBMC version >= 12, using XBMC JSON API", logger.DEBUG) command = '{"jsonrpc":"2.0","method":"GUI.ShowNotification","params":{"title":"%s","message":"%s", "image": "%s"},"id":1}' % (title.encode("utf-8"), message.encode("utf-8"), self.sb_logo_url) notifyResult = self._send_to_xbmc_json(command, curHost, username, password) if notifyResult: result += curHost + ':' + notifyResult["result"].decode(sickbeard.SYS_ENCODING) else: if sickbeard.XBMC_ALWAYS_ON or force: logger.log(u"XBMC: Failed to detect XBMC version for '" + curHost + "', check configuration and try again.", logger.ERROR) result += curHost + ':False' return result ############################################################################## # Legacy HTTP API (pre XBMC 12) methods ############################################################################## def _send_to_xbmc(self, command, host=None, username=None, password=None): """Handles communication to XBMC servers via HTTP API Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the XBMC API via HTTP host: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password Returns: Returns response.result for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.XBMC_USERNAME if not password: password = sickbeard.XBMC_PASSWORD if not host: logger.log(u"XBMC: No host specified, check your settings", logger.DEBUG) return False for key in command: if type(command[key]) == unicode: command[key] = command[key].encode('utf-8') enc_command = urllib.urlencode(command) logger.log(u"XBMC: Encoded API command: " + enc_command, logger.DEBUG) url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, enc_command) try: req = urllib2.Request(url) # if we have a password, use authentication if password: pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, url, username, password) else: pw_mgr = None result = sickbeard.helpers.getURL(req, password_mgr=pw_mgr, throw_exc=True) logger.log(u"XBMC: HTTP response: " + result.replace('\n', ''), logger.DEBUG) return result except (urllib2.URLError, IOError), e: logger.log(u"XBMC: Could not contact XBMC HTTP at " + fixStupidEncodings(url) + " " + ex(e), logger.WARNING) except Exception, e: logger.log(u"XBMC: Exception occurred while trying to access " + fixStupidEncodings(url) + " " + ex(e), logger.WARNING) return False def _update_library(self, host=None, showName=None): """Handles updating XBMC host via HTTP API Attempts to update the XBMC video library for a specific tv show if passed, otherwise update the whole library if enabled. Args: host: XBMC webserver host:port showName: Name of a TV show to specifically target the library update for Returns: Returns True or False """ if not host: logger.log(u"XBMC: No host specified, check your settings", logger.DEBUG) return False # if we're doing per-show if showName: logger.log(u"XBMC: Updating library via HTTP method for show " + showName, logger.MESSAGE) pathSql = 'select path.strPath from path, tvshow, tvshowlinkpath where ' \ 'tvshow.c00 = "%s" and tvshowlinkpath.idShow = tvshow.idShow ' \ 'and tvshowlinkpath.idPath = path.idPath' % (showName) # use this to get xml back for the path lookups xmlCommand = {'command': 'SetResponseFormat(webheader;false;webfooter;false;header;<xml>;footer;</xml>;opentag;<tag>;closetag;</tag>;closefinaltag;false)'} # sql used to grab path(s) sqlCommand = {'command': 'QueryVideoDatabase(%s)' % (pathSql)} # set output back to default resetCommand = {'command': 'SetResponseFormat()'} # set xml response format, if this fails then don't bother with the rest request = self._send_to_xbmc(xmlCommand, host) if not request: return False sqlXML = self._send_to_xbmc(sqlCommand, host) request = self._send_to_xbmc(resetCommand, host) if not sqlXML: logger.log(u"XBMC: Invalid response for " + showName + " on " + host, logger.DEBUG) return False encSqlXML = urllib.quote(sqlXML, ':\\/<>') try: et = etree.fromstring(encSqlXML) except SyntaxError, e: logger.log(u"XBMC: Unable to parse XML returned from XBMC: " + ex(e), logger.ERROR) return False paths = et.findall('.//field') if not paths: logger.log(u"XBMC: No valid paths found for " + showName + " on " + host, logger.DEBUG) return False for path in paths: # we do not need it double-encoded, gawd this is dumb unEncPath = urllib.unquote(path.text).decode(sickbeard.SYS_ENCODING) logger.log(u"XBMC: Updating " + showName + " on " + host + " at " + unEncPath, logger.MESSAGE) updateCommand = {'command': 'ExecBuiltIn', 'parameter': 'XBMC.updatelibrary(video, %s)' % (unEncPath)} request = self._send_to_xbmc(updateCommand, host) if not request: logger.log(u"XBMC: Update of show directory failed on " + showName + " on " + host + " at " + unEncPath, logger.WARNING) return False # sleep for a few seconds just to be sure xbmc has a chance to finish each directory if len(paths) > 1: time.sleep(5) # do a full update if requested else: logger.log(u"XBMC: Doing Full Library update via HTTP method for host: " + host, logger.MESSAGE) updateCommand = {'command': 'ExecBuiltIn', 'parameter': 'XBMC.updatelibrary(video)'} request = self._send_to_xbmc(updateCommand, host) if not request: logger.log(u"XBMC: Full Library update failed on: " + host, logger.ERROR) return False return True ############################################################################## # JSON-RPC API (XBMC 12+) methods ############################################################################## def _send_to_xbmc_json(self, command, host=None, username=None, password=None): """Handles communication to XBMC servers via JSONRPC Args: command: Dictionary of field/data pairs, encoded via urllib and passed to the XBMC JSON-RPC via HTTP host: XBMC webserver host:port username: XBMC webserver username password: XBMC webserver password Returns: Returns response.result for successful commands or False if there was an error """ # fill in omitted parameters if not username: username = sickbeard.XBMC_USERNAME if not password: password = sickbeard.XBMC_PASSWORD if not host: logger.log(u"XBMC: No host specified, check your settings", logger.DEBUG) return False command = command.encode('utf-8') logger.log(u"XBMC: JSON command: " + command, logger.DEBUG) url = 'http://%s/jsonrpc' % (host) try: req = urllib2.Request(url, command) req.add_header("Content-type", "application/json") # if we have a password, use authentication if password: pw_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() pw_mgr.add_password(None, url, username, password) else: pw_mgr = None response = sickbeard.helpers.getURL(req, password_mgr=pw_mgr, throw_exc=True) # parse the json result result = json.loads(response) logger.log(u"XBMC: JSON response: " + str(result), logger.DEBUG) return result # need to return response for parsing except ValueError, e: logger.log(u"XBMC: Unable to decode JSON: " + response, logger.WARNING) except urllib2.URLError, e: logger.log(u"XBMC: Error while trying to retrieve XBMC API version for " + host + ": " + ex(e), logger.WARNING) except IOError, e: logger.log(u"XBMC: Could not contact XBMC JSON API at " + fixStupidEncodings(url) + " " + ex(e), logger.WARNING) except Exception, e: logger.log(u"XBMC: Exception occurred while trying to access " + fixStupidEncodings(url) + " " + ex(e), logger.WARNING) return False def _update_library_json(self, host=None, showName=None): """Handles updating XBMC host via HTTP JSON-RPC Attempts to update the XBMC video library for a specific tv show if passed, otherwise update the whole library if enabled. Args: host: XBMC webserver host:port showName: Name of a TV show to specifically target the library update for Returns: Returns True or False """ if not host: logger.log(u"XBMC: No host specified, check your settings", logger.DEBUG) return False # if we're doing per-show if showName: tvshowid = -1 logger.log(u"XBMC: Updating library via JSON method for show " + showName, logger.MESSAGE) # get tvshowid by showName showsCommand = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","id":1}' showsResponse = self._send_to_xbmc_json(showsCommand, host) if showsResponse and "result" in showsResponse and "tvshows" in showsResponse["result"]: shows = showsResponse["result"]["tvshows"] else: logger.log(u"XBMC: No tvshows in XBMC TV show list", logger.DEBUG) return False for show in shows: if (show["label"] == showName): tvshowid = show["tvshowid"] break # exit out of loop otherwise the label and showname will not match up # this can be big, so free some memory del shows # we didn't find the show (exact match), thus revert to just doing a full update if enabled if (tvshowid == -1): logger.log(u"XBMC: Exact show name not matched in XBMC TV show list", logger.DEBUG) return False # lookup tv-show path pathCommand = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShowDetails","params":{"tvshowid":%d, "properties": ["file"]},"id":1}' % (tvshowid) pathResponse = self._send_to_xbmc_json(pathCommand, host) path = pathResponse["result"]["tvshowdetails"]["file"] logger.log(u"XBMC: Received Show: " + show["label"] + " with ID: " + str(tvshowid) + " Path: " + path, logger.DEBUG) if (len(path) < 1): logger.log(u"XBMC: No valid path found for " + showName + " with ID: " + str(tvshowid) + " on " + host, logger.WARNING) return False logger.log(u"XBMC: Updating " + showName + " on " + host + " at " + path, logger.MESSAGE) updateCommand = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","params":{"directory":%s},"id":1}' % (json.dumps(path)) request = self._send_to_xbmc_json(updateCommand, host) if not request: logger.log(u"XBMC: Update of show directory failed on " + showName + " on " + host + " at " + path, logger.WARNING) return False # catch if there was an error in the returned request for r in request: if 'error' in r: logger.log(u"XBMC: Error while attempting to update show directory for " + showName + " on " + host + " at " + path, logger.ERROR) return False # do a full update if requested else: logger.log(u"XBMC: Doing Full Library update via JSON method for host: " + host, logger.MESSAGE) updateCommand = '{"jsonrpc":"2.0","method":"VideoLibrary.Scan","id":1}' request = self._send_to_xbmc_json(updateCommand, host, sickbeard.XBMC_USERNAME, sickbeard.XBMC_PASSWORD) if not request: logger.log(u"XBMC: Full Library update failed on: " + host, logger.ERROR) return False return True ############################################################################## # Public functions which will call the JSON or Legacy HTTP API methods ############################################################################## def notify_snatch(self, ep_name): if sickbeard.XBMC_NOTIFY_ONSNATCH: self._notify(ep_name, common.notifyStrings[common.NOTIFY_SNATCH]) def notify_download(self, ep_name): if sickbeard.XBMC_NOTIFY_ONDOWNLOAD: self._notify(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD]) def test_notify(self, host, username, password): return self._notify("This is a test notification from Sick Beard", "Test", host, username, password, force=True) def update_library(self, ep_obj=None, show_obj=None): """Public wrapper for the update library functions to branch the logic for JSON-RPC or legacy HTTP API Checks the XBMC API version to branch the logic to call either the legacy HTTP API or the newer JSON-RPC over HTTP methods. Do the ability of accepting a list of hosts delimited by comma, we split off the first host to send the update to. This is a workaround for SQL backend users as updating multiple clients causes duplicate entries. Future plan is to revisit how we store the host/ip/username/pw/options so that it may be more flexible. Args: showName: Name of a TV show to specifically target the library update for Returns: Returns True or False """ if ep_obj: showName = ep_obj.show.name elif show_obj: showName = show_obj.name else: showName = None if sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY: if not sickbeard.XBMC_HOST: logger.log(u"XBMC: No host specified, check your settings", logger.DEBUG) return False if sickbeard.XBMC_UPDATE_ONLYFIRST: # only send update to first host in the list if requested -- workaround for xbmc sql backend users host = sickbeard.XBMC_HOST.split(",")[0].strip() else: host = sickbeard.XBMC_HOST result = 0 for curHost in [x.strip() for x in host.split(",")]: logger.log(u"XBMC: Sending request to update library for host: '" + curHost + "'", logger.MESSAGE) xbmcapi = self._get_xbmc_version(curHost, sickbeard.XBMC_USERNAME, sickbeard.XBMC_PASSWORD) if xbmcapi: if (xbmcapi <= 4): # try to update for just the show, if it fails, do full update if enabled if not self._update_library(curHost, showName): if showName and sickbeard.XBMC_UPDATE_FULL: self._update_library(curHost) else: # try to update for just the show, if it fails, do full update if enabled if not self._update_library_json(curHost, showName): if showName and sickbeard.XBMC_UPDATE_FULL: self._update_library_json(curHost) else: if sickbeard.XBMC_ALWAYS_ON: logger.log(u"XBMC: Failed to detect XBMC version for '" + curHost + "', check configuration and try again.", logger.ERROR) result = result + 1 # needed for the 'update xbmc' submenu command # as it only cares of the final result vs the individual ones if result == 0: return True else: return False notifier = XBMCNotifier
22,572
Python
.py
401
42.817955
212
0.581689
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,355
tweet.py
midgetspy_Sick-Beard/sickbeard/notifiers/tweet.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard from sickbeard import logger, common from sickbeard.exceptions import ex # parse_qsl moved to urlparse module in v2.6 try: from urlparse import parse_qsl # @UnusedImport except: from cgi import parse_qsl # @Reimport import lib.oauth2 as oauth import lib.pythontwitter as twitter class TwitterNotifier: consumer_key = "vHHtcB6WzpWDG6KYlBMr8g" consumer_secret = "zMqq5CB3f8cWKiRO2KzWPTlBanYmV0VYxSXZ0Pxds0E" REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" AUTHORIZATION_URL = "https://api.twitter.com/oauth/authorize" SIGNIN_URL = "https://api.twitter.com/oauth/authenticate" def _get_authorization(self): signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() # @UnusedVariable oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret) oauth_client = oauth.Client(oauth_consumer) logger.log(u'TWITTER: Requesting temp token from Twitter', logger.DEBUG) resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET') if resp['status'] != '200': logger.log(u"TWITTER: Invalid respond from Twitter requesting temp token: %s" % resp['status'], logger.ERROR) else: request_token = dict(parse_qsl(content)) sickbeard.TWITTER_USERNAME = request_token['oauth_token'] sickbeard.TWITTER_PASSWORD = request_token['oauth_token_secret'] return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token'] def _get_credentials(self, key): request_token = {} request_token['oauth_token'] = sickbeard.TWITTER_USERNAME request_token['oauth_token_secret'] = sickbeard.TWITTER_PASSWORD request_token['oauth_callback_confirmed'] = 'true' token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(key) logger.log(u"TWITTER: Generating and signing request for an access token using key " + key, logger.DEBUG) signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() # @UnusedVariable oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret) logger.log(u"TWITTER: oauth_consumer: " + str(oauth_consumer), logger.DEBUG) oauth_client = oauth.Client(oauth_consumer, token) logger.log(u"TWITTER: oauth_client: " + str(oauth_client), logger.DEBUG) resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key) logger.log(u"TWITTER: resp, content: " + str(resp) + "," + str(content), logger.DEBUG) access_token = dict(parse_qsl(content)) logger.log(u"TWITTER: access_token: " + str(access_token), logger.DEBUG) logger.log(u"TWITTER: resp[status] = " + str(resp['status']), logger.DEBUG) if resp['status'] != '200': logger.log(u"TWITTER: The request for a token with did not succeed: " + str(resp['status']), logger.ERROR) return False else: logger.log(u"TWITTER: Your Twitter Access Token key: %s" % access_token['oauth_token'], logger.DEBUG) logger.log(u"TWITTER: Access Token secret: %s" % access_token['oauth_token_secret'], logger.DEBUG) sickbeard.TWITTER_USERNAME = access_token['oauth_token'] sickbeard.TWITTER_PASSWORD = access_token['oauth_token_secret'] return True def _send_tweet(self, message=None): username = self.consumer_key password = self.consumer_secret access_token_key = sickbeard.TWITTER_USERNAME access_token_secret = sickbeard.TWITTER_PASSWORD logger.log(u"TWITTER: Sending tweet: " + message, logger.DEBUG) api = twitter.Api(username, password, access_token_key, access_token_secret) try: api.PostUpdate(message) except Exception, e: logger.log(u"TWITTER: Error Sending Tweet: " + ex(e), logger.ERROR) return False return True def _notify(self, message='', force=False): # suppress notifications if the notifier is disabled but the notify options are checked if not sickbeard.USE_TWITTER and not force: return False return self._send_tweet(sickbeard.TWITTER_PREFIX + ": " + message) ############################################################################## # Public functions ############################################################################## def notify_snatch(self, ep_name): if sickbeard.TWITTER_NOTIFY_ONSNATCH: self._notify(common.notifyStrings[common.NOTIFY_SNATCH] + ': ' + ep_name) def notify_download(self, ep_name): if sickbeard.TWITTER_NOTIFY_ONDOWNLOAD: self._notify(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ': ' + ep_name) def test_notify(self): return self._notify("This is a test notification from Sick Beard", force=True) def update_library(self, ep_obj): pass notifier = TwitterNotifier
5,901
Python
.py
106
48.518868
121
0.673554
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,356
xbmc_12plus.py
midgetspy_Sick-Beard/sickbeard/metadata/xbmc_12plus.py
# URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import generic import datetime from lib.tvdb_api import tvdb_api, tvdb_exceptions import sickbeard from sickbeard import logger, exceptions, helpers from sickbeard.exceptions import ex try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class XBMC_12PlusMetadata(generic.GenericMetadata): """ Metadata generation class for XBMC 12+. The following file structure is used: show_root/tvshow.nfo (show metadata) show_root/fanart.jpg (fanart) show_root/poster.jpg (poster) show_root/banner.jpg (banner) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.nfo (episode metadata) show_root/Season ##/filename-thumb.jpg (episode thumb) show_root/season##-poster.jpg (season posters) show_root/season##-banner.jpg (season banners) show_root/season-all-poster.jpg (season all poster) show_root/season-all-banner.jpg (season all banner) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'XBMC 12+' self.poster_name = "poster.jpg" self.season_all_poster_name = "season-all-poster.jpg" # web-ui metadata template self.eg_show_metadata = "tvshow.nfo" self.eg_episode_metadata = "Season##\\<i>filename</i>.nfo" self.eg_fanart = "fanart.jpg" self.eg_poster = "poster.jpg" self.eg_banner = "banner.jpg" self.eg_episode_thumbnails = "Season##\\<i>filename</i>-thumb.jpg" self.eg_season_posters = "season##-poster.jpg" self.eg_season_banners = "season##-banner.jpg" self.eg_season_all_poster = "season-all-poster.jpg" self.eg_season_all_banner = "season-all-banner.jpg" def _show_data(self, show_obj): """ Creates an elementTree XML structure for an XBMC-style tvshow.nfo and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ show_ID = show_obj.tvdbid tvdb_lang = show_obj.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) tv_node = etree.Element("tvshow") try: myShow = t[int(show_ID)] except tvdb_exceptions.tvdb_shownotfound: logger.log(u"Unable to find show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR) raise except tvdb_exceptions.tvdb_error: logger.log(u"TVDB is down, can't use its data to add this show", logger.ERROR) raise # check for title and id try: if myShow["seriesname"] is None or myShow["seriesname"] == "" or myShow["id"] is None or myShow["id"] == "": logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR) return False except tvdb_exceptions.tvdb_attributenotfound: logger.log(u"Incomplete info for show with id " + str(show_ID) + " on tvdb, skipping it", logger.ERROR) return False title = etree.SubElement(tv_node, "title") if myShow["seriesname"] is not None: title.text = myShow["seriesname"] rating = etree.SubElement(tv_node, "rating") if myShow["rating"] is not None: rating.text = myShow["rating"] year = etree.SubElement(tv_node, "year") if myShow["firstaired"] is not None: try: year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year) if year_text: year.text = year_text except: pass plot = etree.SubElement(tv_node, "plot") if myShow["overview"] is not None: plot.text = myShow["overview"] episodeguide = etree.SubElement(tv_node, "episodeguide") episodeguideurl = etree.SubElement(episodeguide, "url") episodeguideurl2 = etree.SubElement(tv_node, "episodeguideurl") if myShow["id"] is not None: showurl = sickbeard.TVDB_BASE_URL + '/series/' + myShow["id"] + '/all/en.zip' episodeguideurl.text = showurl episodeguideurl2.text = showurl mpaa = etree.SubElement(tv_node, "mpaa") if myShow["contentrating"] is not None: mpaa.text = myShow["contentrating"] tvdbid = etree.SubElement(tv_node, "id") if myShow["id"] is not None: tvdbid.text = myShow["id"] genre = etree.SubElement(tv_node, "genre") if myShow["genre"] is not None: genre.text = " / ".join([x.strip() for x in myShow["genre"].split('|') if x and x.strip()]) premiered = etree.SubElement(tv_node, "premiered") if myShow["firstaired"] is not None: premiered.text = myShow["firstaired"] studio = etree.SubElement(tv_node, "studio") if myShow["network"] is not None: studio.text = myShow["network"] if myShow["_actors"] is not None: for actor in myShow["_actors"]: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(tv_node, "actor") cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = cur_actor_name_text.strip() cur_actor_role = etree.SubElement(cur_actor, "role") cur_actor_role_text = actor['role'] if cur_actor_role_text is not None: cur_actor_role.text = cur_actor_role_text cur_actor_thumb = etree.SubElement(cur_actor, "thumb") cur_actor_thumb_text = actor['image'] if cur_actor_thumb_text is not None: cur_actor_thumb.text = cur_actor_thumb_text # Make it purdy helpers.indentXML(tv_node) data = etree.ElementTree(tv_node) return data def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for an XBMC-style episode.nfo and returns the resulting data object. show_obj: a TVEpisode instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps tvdb_lang = ep_obj.show.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang try: t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) myShow = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(e.message) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR) return if len(eps_to_write) > 1: rootNode = etree.Element("xbmcmultiepisode") else: rootNode = etree.Element("episodedetails") # write an NFO containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?") return None if not myEp["firstaired"]: myEp["firstaired"] = str(datetime.date.fromordinal(1)) if not myEp["episodename"]: logger.log(u"Not generating nfo because the ep has no title", logger.DEBUG) return None logger.log(u"Creating metadata for episode " + str(ep_obj.season) + "x" + str(ep_obj.episode), logger.DEBUG) if len(eps_to_write) > 1: episode = etree.SubElement(rootNode, "episodedetails") else: episode = rootNode title = etree.SubElement(episode, "title") if curEpToWrite.name is not None: title.text = curEpToWrite.name showtitle = etree.SubElement(episode, "showtitle") if curEpToWrite.show.name is not None: showtitle.text = curEpToWrite.show.name season = etree.SubElement(episode, "season") season.text = str(curEpToWrite.season) episodenum = etree.SubElement(episode, "episode") episodenum.text = str(curEpToWrite.episode) uniqueid = etree.SubElement(episode, "uniqueid") uniqueid.text = str(curEpToWrite.tvdbid) aired = etree.SubElement(episode, "aired") if curEpToWrite.airdate != datetime.date.fromordinal(1): aired.text = str(curEpToWrite.airdate) else: aired.text = '' plot = etree.SubElement(episode, "plot") if curEpToWrite.description is not None: plot.text = curEpToWrite.description runtime = etree.SubElement(episode, "runtime") if curEpToWrite.season != 0: if myShow["runtime"] is not None: runtime.text = myShow["runtime"] displayseason = etree.SubElement(episode, "displayseason") if 'airsbefore_season' in myEp: displayseason_text = myEp['airsbefore_season'] if displayseason_text is not None: displayseason.text = displayseason_text displayepisode = etree.SubElement(episode, "displayepisode") if 'airsbefore_episode' in myEp: displayepisode_text = myEp['airsbefore_episode'] if displayepisode_text is not None: displayepisode.text = displayepisode_text thumb = etree.SubElement(episode, "thumb") thumb_text = myEp['filename'] if thumb_text is not None: thumb.text = thumb_text watched = etree.SubElement(episode, "watched") watched.text = 'false' credits = etree.SubElement(episode, "credits") credits_text = myEp['writer'] if credits_text is not None: credits.text = credits_text director = etree.SubElement(episode, "director") director_text = myEp['director'] if director_text is not None: director.text = director_text rating = etree.SubElement(episode, "rating") rating_text = myEp['rating'] if rating_text is not None: rating.text = rating_text gueststar_text = myEp['gueststars'] if gueststar_text is not None: for actor in (x.strip() for x in gueststar_text.split('|') if x and x.strip()): cur_actor = etree.SubElement(episode, "actor") cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = actor if myShow['_actors'] is not None: for actor in myShow['_actors']: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(episode, "actor") cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = cur_actor_name_text.strip() cur_actor_role = etree.SubElement(cur_actor, "role") cur_actor_role_text = actor['role'] if cur_actor_role_text is not None: cur_actor_role.text = cur_actor_role_text cur_actor_thumb = etree.SubElement(cur_actor, "thumb") cur_actor_thumb_text = actor['image'] if cur_actor_thumb_text is not None: cur_actor_thumb.text = cur_actor_thumb_text # Make it purdy helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data # present a standard "interface" from the module metadata_class = XBMC_12PlusMetadata
14,618
Python
.py
288
37.618056
177
0.578807
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,357
mediabrowser.py
midgetspy_Sick-Beard/sickbeard/metadata/mediabrowser.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os import re import sickbeard import generic from sickbeard import logger, exceptions, helpers from sickbeard import encodingKludge as ek from lib.tvdb_api import tvdb_api, tvdb_exceptions from sickbeard.exceptions import ex try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class MediaBrowserMetadata(generic.GenericMetadata): """ Metadata generation class for Media Browser 2.x/3.x - Standard Mode. The following file structure is used: show_root/series.xml (show metadata) show_root/folder.jpg (poster) show_root/backdrop.jpg (fanart) show_root/Season ##/folder.jpg (season thumb) show_root/Season ##/filename.ext (*) show_root/Season ##/metadata/filename.xml (episode metadata) show_root/Season ##/metadata/filename.jpg (episode thumb) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = "MediaBrowser" self._ep_nfo_extension = "xml" self._show_metadata_filename = "series.xml" self.fanart_name = "backdrop.jpg" self.poster_name = "folder.jpg" # web-ui metadata template self.eg_show_metadata = "series.xml" self.eg_episode_metadata = "Season##\\metadata\\<i>filename</i>.xml" self.eg_fanart = "backdrop.jpg" self.eg_poster = "folder.jpg" self.eg_banner = "banner.jpg" self.eg_episode_thumbnails = "Season##\\metadata\\<i>filename</i>.jpg" self.eg_season_posters = "Season##\\folder.jpg" self.eg_season_banners = "Season##\\banner.jpg" self.eg_season_all_poster = "<i>not supported</i>" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def retrieveShowMetadata(self, folder): # while show metadata is generated, it is not supported for our lookup return (None, None) def create_season_all_poster(self, show_obj): pass def create_season_all_banner(self, show_obj): pass def get_episode_file_path(self, ep_obj): """ Returns a full show dir/metadata/episode.xml path for MediaBrowser episode metadata files ep_obj: a TVEpisode object to get the path for """ if ek.ek(os.path.isfile, ep_obj.location): xml_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), self._ep_nfo_extension) metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata') xml_file_path = ek.ek(os.path.join, metadata_dir_name, xml_file_name) else: logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) return '' return xml_file_path def get_episode_thumb_path(self, ep_obj): """ Returns a full show dir/metadata/episode.jpg path for MediaBrowser episode thumbs. ep_obj: a TVEpisode object to get the path from """ if ek.ek(os.path.isfile, ep_obj.location): tbn_file_name = helpers.replaceExtension(ek.ek(os.path.basename, ep_obj.location), 'jpg') metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), 'metadata') tbn_file_path = ek.ek(os.path.join, metadata_dir_name, tbn_file_name) else: return None return tbn_file_path def get_season_poster_path(self, show_obj, season): """ Season thumbs for MediaBrowser go in Show Dir/Season X/folder.jpg If no season folder exists, None is returned """ dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] season_dir_regex = '^Season\s+(\d+)$' season_dir = None for cur_dir in dir_list: # MediaBrowser 1.x only supports 'Specials' # MediaBrowser 2.x looks to only support 'Season 0' # MediaBrowser 3.x looks to mimic XBMC/Plex support if season == 0 and cur_dir == "Specials": season_dir = cur_dir break match = re.match(season_dir_regex, cur_dir, re.I) if not match: continue cur_season = int(match.group(1)) if cur_season == season: season_dir = cur_dir break if not season_dir: logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) return None logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg') def get_season_banner_path(self, show_obj, season): """ Season thumbs for MediaBrowser go in Show Dir/Season X/banner.jpg If no season folder exists, None is returned """ dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] season_dir_regex = '^Season\s+(\d+)$' season_dir = None for cur_dir in dir_list: # MediaBrowser 1.x only supports 'Specials' # MediaBrowser 2.x looks to only support 'Season 0' # MediaBrowser 3.x looks to mimic XBMC/Plex support if season == 0 and cur_dir == "Specials": season_dir = cur_dir break match = re.match(season_dir_regex, cur_dir, re.I) if not match: continue cur_season = int(match.group(1)) if cur_season == season: season_dir = cur_dir break if not season_dir: logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) return None logger.log(u"Using " + str(season_dir) + "/banner.jpg as season dir for season " + str(season), logger.DEBUG) return ek.ek(os.path.join, show_obj.location, season_dir, 'banner.jpg') def _show_data(self, show_obj): """ Creates an elementTree XML structure for a MediaBrowser-style series.xml returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ tvdb_lang = show_obj.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) tv_node = etree.Element("Series") try: myShow = t[int(show_obj.tvdbid)] except tvdb_exceptions.tvdb_shownotfound: logger.log(u"Unable to find show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) raise except tvdb_exceptions.tvdb_error: logger.log(u"TVDB is down, can't use its data to make the NFO", logger.ERROR) raise # check for title and id try: if myShow['seriesname'] is None or myShow['seriesname'] == "" or myShow['id'] is None or myShow['id'] == "": logger.log(u"Incomplete info for show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) return False except tvdb_exceptions.tvdb_attributenotfound: logger.log(u"Incomplete info for show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) return False tvdbid = etree.SubElement(tv_node, "id") if myShow['id'] is not None: tvdbid.text = myShow['id'] SeriesName = etree.SubElement(tv_node, "SeriesName") if myShow['seriesname'] is not None: SeriesName.text = myShow['seriesname'] Status = etree.SubElement(tv_node, "Status") if myShow['status'] is not None: Status.text = myShow['status'] Network = etree.SubElement(tv_node, "Network") if myShow['network'] is not None: Network.text = myShow['network'] Airs_Time = etree.SubElement(tv_node, "Airs_Time") if myShow['airs_time'] is not None: Airs_Time.text = myShow['airs_time'] Airs_DayOfWeek = etree.SubElement(tv_node, "Airs_DayOfWeek") if myShow['airs_dayofweek'] is not None: Airs_DayOfWeek.text = myShow['airs_dayofweek'] FirstAired = etree.SubElement(tv_node, "FirstAired") if myShow['firstaired'] is not None: FirstAired.text = myShow['firstaired'] ContentRating = etree.SubElement(tv_node, "ContentRating") MPAARating = etree.SubElement(tv_node, "MPAARating") certification = etree.SubElement(tv_node, "certification") if myShow['contentrating'] is not None: ContentRating.text = myShow['contentrating'] MPAARating.text = myShow['contentrating'] certification.text = myShow['contentrating'] MetadataType = etree.SubElement(tv_node, "Type") MetadataType.text = "Series" Overview = etree.SubElement(tv_node, "Overview") if myShow['overview'] is not None: Overview.text = myShow['overview'] PremiereDate = etree.SubElement(tv_node, "PremiereDate") if myShow['firstaired'] is not None: PremiereDate.text = myShow['firstaired'] Rating = etree.SubElement(tv_node, "Rating") if myShow['rating'] is not None: Rating.text = myShow['rating'] ProductionYear = etree.SubElement(tv_node, "ProductionYear") if myShow['firstaired'] is not None: try: year_text = str(datetime.datetime.strptime(myShow['firstaired'], '%Y-%m-%d').year) if year_text: ProductionYear.text = year_text except: pass RunningTime = etree.SubElement(tv_node, "RunningTime") Runtime = etree.SubElement(tv_node, "Runtime") if myShow['runtime'] is not None: RunningTime.text = myShow['runtime'] Runtime.text = myShow['runtime'] IMDB_ID = etree.SubElement(tv_node, "IMDB_ID") IMDB = etree.SubElement(tv_node, "IMDB") IMDbId = etree.SubElement(tv_node, "IMDbId") if myShow['imdb_id'] is not None: IMDB_ID.text = myShow['imdb_id'] IMDB.text = myShow['imdb_id'] IMDbId.text = myShow['imdb_id'] Zap2ItId = etree.SubElement(tv_node, "Zap2ItId") if myShow['zap2it_id'] is not None: Zap2ItId.text = myShow['zap2it_id'] Genres = etree.SubElement(tv_node, "Genres") if myShow["genre"] is not None: for genre in myShow['genre'].split('|'): if genre and genre.strip(): cur_genre = etree.SubElement(Genres, "Genre") cur_genre.text = genre.strip() Genre = etree.SubElement(tv_node, "Genre") if myShow["genre"] is not None: Genre.text = "|".join([x.strip() for x in myShow["genre"].split('|') if x and x.strip()]) Studios = etree.SubElement(tv_node, "Studios") Studio = etree.SubElement(Studios, "Studio") if myShow["network"] is not None: Studio.text = myShow['network'] Persons = etree.SubElement(tv_node, "Persons") if myShow["_actors"] is not None: for actor in myShow["_actors"]: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(Persons, "Person") cur_actor_name = etree.SubElement(cur_actor, "Name") cur_actor_name.text = cur_actor_name_text.strip() cur_actor_type = etree.SubElement(cur_actor, "Type") cur_actor_type.text = "Actor" cur_actor_role = etree.SubElement(cur_actor, "Role") cur_actor_role_text = actor['role'] if cur_actor_role_text is not None: cur_actor_role.text = cur_actor_role_text helpers.indentXML(tv_node) data = etree.ElementTree(tv_node) return data def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a MediaBrowser style episode.xml and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps persons_dict = {} persons_dict['Director'] = [] persons_dict['GuestStar'] = [] persons_dict['Writer'] = [] tvdb_lang = ep_obj.show.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) myShow = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(e.message) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR) return False rootNode = etree.Element("Item") # write an MediaBrowser XML containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?") return None if curEpToWrite == ep_obj: # root (or single) episode # default to today's date for specials if firstaired is not set if myEp['firstaired'] is None and ep_obj.season == 0: myEp['firstaired'] = str(datetime.date.fromordinal(1)) if myEp['episodename'] is None or myEp['firstaired'] is None: return None episode = rootNode EpisodeName = etree.SubElement(episode, "EpisodeName") if curEpToWrite.name is not None: EpisodeName.text = curEpToWrite.name else: EpisodeName.text = "" EpisodeNumber = etree.SubElement(episode, "EpisodeNumber") EpisodeNumber.text = str(ep_obj.episode) if ep_obj.relatedEps: EpisodeNumberEnd = etree.SubElement(episode, "EpisodeNumberEnd") EpisodeNumberEnd.text = str(curEpToWrite.episode) SeasonNumber = etree.SubElement(episode, "SeasonNumber") SeasonNumber.text = str(curEpToWrite.season) if not ep_obj.relatedEps: absolute_number = etree.SubElement(episode, "absolute_number") absolute_number.text = myEp['absolute_number'] FirstAired = etree.SubElement(episode, "FirstAired") if curEpToWrite.airdate != datetime.date.fromordinal(1): FirstAired.text = str(curEpToWrite.airdate) else: FirstAired.text = "" MetadataType = etree.SubElement(episode, "Type") MetadataType.text = "Episode" Overview = etree.SubElement(episode, "Overview") if curEpToWrite.description is not None: Overview.text = curEpToWrite.description else: Overview.text = "" if not ep_obj.relatedEps: Rating = etree.SubElement(episode, "Rating") rating_text = myEp['rating'] if rating_text is not None: Rating.text = rating_text IMDB_ID = etree.SubElement(episode, "IMDB_ID") IMDB = etree.SubElement(episode, "IMDB") IMDbId = etree.SubElement(episode, "IMDbId") if myShow['imdb_id'] is not None: IMDB_ID.text = myShow['imdb_id'] IMDB.text = myShow['imdb_id'] IMDbId.text = myShow['imdb_id'] TvDbId = etree.SubElement(episode, "TvDbId") TvDbId.text = str(curEpToWrite.tvdbid) Persons = etree.SubElement(episode, "Persons") Language = etree.SubElement(episode, "Language") Language.text = myEp['language'] thumb = etree.SubElement(episode, "filename") # TODO: See what this is needed for.. if its still needed # just write this to the NFO regardless of whether it actually exists or not # note: renaming files after nfo generation will break this, tough luck thumb_text = self.get_episode_thumb_path(ep_obj) if thumb_text: thumb.text = thumb_text else: # append data from (if any) related episodes EpisodeNumberEnd.text = str(curEpToWrite.episode) if curEpToWrite.name: if not EpisodeName.text: EpisodeName.text = curEpToWrite.name else: EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name if curEpToWrite.description: if not Overview.text: Overview.text = curEpToWrite.description else: Overview.text = Overview.text + "\r" + curEpToWrite.description # collect all directors, guest stars and writers if myEp['director']: persons_dict['Director'] += [x.strip() for x in myEp['director'].split('|') if x and x.strip()] if myEp['gueststars']: persons_dict['GuestStar'] += [x.strip() for x in myEp['gueststars'].split('|') if x and x.strip()] if myEp['writer']: persons_dict['Writer'] += [x.strip() for x in myEp['writer'].split('|') if x and x.strip()] # fill in Persons section with collected directors, guest starts and writers for person_type, names in persons_dict.iteritems(): # remove doubles names = list(set(names)) for cur_name in names: Person = etree.SubElement(Persons, "Person") cur_person_name = etree.SubElement(Person, "Name") cur_person_name.text = cur_name cur_person_type = etree.SubElement(Person, "Type") cur_person_type.text = person_type helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data # present a standard "interface" from the module metadata_class = MediaBrowserMetadata
21,904
Python
.py
418
38.181818
178
0.571994
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,358
mede8er.py
midgetspy_Sick-Beard/sickbeard/metadata/mede8er.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import sickbeard import mediabrowser from sickbeard import logger, exceptions, helpers from lib.tvdb_api import tvdb_api, tvdb_exceptions from sickbeard.exceptions import ex try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class Mede8erMetadata(mediabrowser.MediaBrowserMetadata): """ Metadata generation class for Mede8er based on the MediaBrowser. The following file structure is used: show_root/series.xml (show metadata) show_root/folder.jpg (poster) show_root/fanart.jpg (fanart) show_root/Season ##/folder.jpg (season thumb) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.xml (episode metadata) show_root/Season ##/filename.jpg (episode thumb) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): mediabrowser.MediaBrowserMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = "Mede8er" self.fanart_name = "fanart.jpg" # web-ui metadata template # self.eg_show_metadata = "series.xml" self.eg_episode_metadata = "Season##\\<i>filename</i>.xml" self.eg_fanart = "fanart.jpg" # self.eg_poster = "folder.jpg" # self.eg_banner = "banner.jpg" self.eg_episode_thumbnails = "Season##\\<i>filename</i>.jpg" # self.eg_season_posters = "Season##\\folder.jpg" # self.eg_season_banners = "Season##\\banner.jpg" # self.eg_season_all_poster = "<i>not supported</i>" # self.eg_season_all_banner = "<i>not supported</i>" def get_episode_file_path(self, ep_obj): return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension) def get_episode_thumb_path(self, ep_obj): return helpers.replaceExtension(ep_obj.location, 'jpg') def _show_data(self, show_obj): """ Creates an elementTree XML structure for a MediaBrowser-style series.xml returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ tvdb_lang = show_obj.lang # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) rootNode = etree.Element("details") tv_node = etree.SubElement(rootNode, "movie") tv_node.attrib["isExtra"] = "false" tv_node.attrib["isSet"] = "false" tv_node.attrib["isTV"] = "true" try: myShow = t[int(show_obj.tvdbid)] except tvdb_exceptions.tvdb_shownotfound: logger.log(u"Unable to find show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) raise except tvdb_exceptions.tvdb_error: logger.log(u"TVDB is down, can't use its data to make the NFO", logger.ERROR) raise # check for title and id try: if myShow['seriesname'] is None or myShow['seriesname'] == "" or myShow['id'] is None or myShow['id'] == "": logger.log(u"Incomplete info for show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) return False except tvdb_exceptions.tvdb_attributenotfound: logger.log(u"Incomplete info for show with id " + str(show_obj.tvdbid) + " on tvdb, skipping it", logger.ERROR) return False SeriesName = etree.SubElement(tv_node, "title") if myShow['seriesname'] is not None: SeriesName.text = myShow['seriesname'] else: SeriesName.text = "" Genres = etree.SubElement(tv_node, "genres") if myShow["genre"] is not None: for genre in myShow['genre'].split('|'): if genre and genre.strip(): cur_genre = etree.SubElement(Genres, "genre") cur_genre.text = genre.strip() FirstAired = etree.SubElement(tv_node, "premiered") if myShow['firstaired'] is not None: FirstAired.text = myShow['firstaired'] year = etree.SubElement(tv_node, "year") if myShow["firstaired"] is not None: try: year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year) if year_text: year.text = year_text except: pass if myShow['rating'] is not None: try: rating = int((float(myShow['rating']) * 10)) except ValueError: rating = 0 Rating = etree.SubElement(tv_node, "rating") rating_text = str(rating) if rating_text is not None: Rating.text = rating_text Status = etree.SubElement(tv_node, "status") if myShow['status'] is not None: Status.text = myShow['status'] mpaa = etree.SubElement(tv_node, "mpaa") if myShow["contentrating"] is not None: mpaa.text = myShow["contentrating"] if myShow['imdb_id'] is not None: IMDB_ID = etree.SubElement(tv_node, "id") IMDB_ID.attrib["moviedb"] = "imdb" IMDB_ID.text = myShow['imdb_id'] if myShow['zap2it_id'] is not None: zap2it_id = etree.SubElement(tv_node, "id") zap2it_id.attrib["moviedb"] = "zap2it" zap2it_id.text = myShow['zap2it_id'] tvdbid = etree.SubElement(tv_node, "tvdbid") if myShow['id'] is not None: tvdbid.text = myShow['id'] Runtime = etree.SubElement(tv_node, "runtime") if myShow['runtime'] is not None: Runtime.text = myShow['runtime'] cast = etree.SubElement(tv_node, "cast") if myShow["_actors"] is not None: for actor in myShow['_actors']: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(cast, "actor") cur_actor.text = cur_actor_name_text.strip() helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a MediaBrowser style episode.xml and returns the resulting data object. show_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps tvdb_lang = ep_obj.show.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) myShow = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(e.message) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR) return False rootNode = etree.Element("details") movie = etree.SubElement(rootNode, "movie") movie.attrib["isExtra"] = "false" movie.attrib["isSet"] = "false" movie.attrib["isTV"] = "true" # write an MediaBrowser XML containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?") return None if curEpToWrite == ep_obj: # root (or single) episode # default to today's date for specials if firstaired is not set if myEp['firstaired'] is None and ep_obj.season == 0: myEp['firstaired'] = str(datetime.date.fromordinal(1)) if myEp['episodename'] is None or myEp['firstaired'] is None: return None episode = movie EpisodeName = etree.SubElement(episode, "title") if curEpToWrite.name is not None: EpisodeName.text = curEpToWrite.name else: EpisodeName.text = "" SeasonNumber = etree.SubElement(episode, "season") SeasonNumber.text = str(curEpToWrite.season) EpisodeNumber = etree.SubElement(episode, "episode") EpisodeNumber.text = str(ep_obj.episode) year = etree.SubElement(episode, "year") if myShow["firstaired"] is not None: try: year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year) if year_text: year.text = year_text except: pass plot = etree.SubElement(episode, "plot") if myShow["overview"] is not None: plot.text = myShow["overview"] Overview = etree.SubElement(episode, "episodeplot") if curEpToWrite.description is not None: Overview.text = curEpToWrite.description else: Overview.text = "" mpaa = etree.SubElement(episode, "mpaa") if myShow["contentrating"] is not None: mpaa.text = myShow["contentrating"] if not ep_obj.relatedEps: if myEp["rating"] is not None: try: rating = int((float(myEp['rating']) * 10)) except ValueError: rating = 0 Rating = etree.SubElement(episode, "rating") rating_text = str(rating) if rating_text is not None: Rating.text = rating_text director = etree.SubElement(episode, "director") director_text = myEp['director'] if director_text is not None: director.text = director_text credits = etree.SubElement(episode, "credits") credits_text = myEp['writer'] if credits_text is not None: credits.text = credits_text cast = etree.SubElement(episode, "cast") if myShow["_actors"] is not None: for actor in myShow['_actors']: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(cast, "actor") cur_actor.text = cur_actor_name_text.strip() else: # append data from (if any) related episodes if curEpToWrite.name: if not EpisodeName.text: EpisodeName.text = curEpToWrite.name else: EpisodeName.text = EpisodeName.text + ", " + curEpToWrite.name if curEpToWrite.description: if not Overview.text: Overview.text = curEpToWrite.description else: Overview.text = Overview.text + "\r" + curEpToWrite.description helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data # present a standard "interface" from the module metadata_class = Mede8erMetadata
13,969
Python
.py
284
35.323944
177
0.560332
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,359
wdtv.py
midgetspy_Sick-Beard/sickbeard/metadata/wdtv.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os import re import sickbeard import generic from sickbeard import logger, exceptions, helpers from sickbeard import encodingKludge as ek from lib.tvdb_api import tvdb_api, tvdb_exceptions from sickbeard.exceptions import ex try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class WDTVMetadata(generic.GenericMetadata): """ Metadata generation class for WDTV The following file structure is used: show_root/folder.jpg (poster) show_root/Season ##/folder.jpg (season thumb) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.metathumb (episode thumb) show_root/Season ##/filename.xml (episode metadata) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'WDTV' self._ep_nfo_extension = 'xml' self.poster_name = "folder.jpg" # web-ui metadata template self.eg_show_metadata = "<i>not supported</i>" self.eg_episode_metadata = "Season##\\<i>filename</i>.xml" self.eg_fanart = "<i>not supported</i>" self.eg_poster = "folder.jpg" self.eg_banner = "<i>not supported</i>" self.eg_episode_thumbnails = "Season##\\<i>filename</i>.metathumb" self.eg_season_posters = "Season##\\folder.jpg" self.eg_season_banners = "<i>not supported</i>" self.eg_season_all_poster = "<i>not supported</i>" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def retrieveShowMetadata(self, folder): # no show metadata generated, we abort this lookup function return (None, None) def create_show_metadata(self, show_obj): pass def get_show_file_path(self, show_obj): pass def create_fanart(self, show_obj): pass def create_banner(self, show_obj): pass def create_season_banners(self, show_obj): pass def create_season_all_poster(self, show_obj): pass def create_season_all_banner(self, show_obj): pass def get_episode_thumb_path(self, ep_obj): """ Returns the path where the episode thumbnail should be stored. Defaults to the same path as the episode file but with a .metathumb extension. ep_obj: a TVEpisode instance for which to create the thumbnail """ if ek.ek(os.path.isfile, ep_obj.location): tbn_filename = helpers.replaceExtension(ep_obj.location, 'metathumb') else: return None return tbn_filename def get_season_poster_path(self, show_obj, season): """ Season thumbs for WDTV go in Show Dir/Season X/folder.jpg If no season folder exists, None is returned """ dir_list = [x for x in ek.ek(os.listdir, show_obj.location) if ek.ek(os.path.isdir, ek.ek(os.path.join, show_obj.location, x))] season_dir_regex = '^Season\s+(\d+)$' season_dir = None for cur_dir in dir_list: if season == 0 and cur_dir == "Specials": season_dir = cur_dir break match = re.match(season_dir_regex, cur_dir, re.I) if not match: continue cur_season = int(match.group(1)) if cur_season == season: season_dir = cur_dir break if not season_dir: logger.log(u"Unable to find a season dir for season " + str(season), logger.DEBUG) return None logger.log(u"Using " + str(season_dir) + "/folder.jpg as season dir for season " + str(season), logger.DEBUG) return ek.ek(os.path.join, show_obj.location, season_dir, 'folder.jpg') def _ep_data(self, ep_obj): """ Creates an elementTree XML structure for a WDTV style episode.xml and returns the resulting data object. ep_obj: a TVShow instance to create the NFO for """ eps_to_write = [ep_obj] + ep_obj.relatedEps tvdb_lang = ep_obj.show.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) myShow = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(e.message) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR) return False rootNode = etree.Element("details") # write an WDTV XML containing info for all matching episodes for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?") return None if myEp["firstaired"] is None and ep_obj.season == 0: myEp["firstaired"] = str(datetime.date.fromordinal(1)) if myEp["episodename"] is None or myEp["firstaired"] is None: return None if len(eps_to_write) > 1: episode = etree.SubElement(rootNode, "details") else: episode = rootNode # TODO: get right EpisodeID episodeID = etree.SubElement(episode, "id") episodeID.text = str(curEpToWrite.tvdbid) title = etree.SubElement(episode, "title") title.text = ep_obj.prettyName() seriesName = etree.SubElement(episode, "series_name") if myShow["seriesname"] is not None: seriesName.text = myShow["seriesname"] episodeName = etree.SubElement(episode, "episode_name") if curEpToWrite.name is not None: episodeName.text = curEpToWrite.name seasonNumber = etree.SubElement(episode, "season_number") seasonNumber.text = str(curEpToWrite.season) episodeNum = etree.SubElement(episode, "episode_number") episodeNum.text = str(curEpToWrite.episode) firstAired = etree.SubElement(episode, "firstaired") if curEpToWrite.airdate != datetime.date.fromordinal(1): firstAired.text = str(curEpToWrite.airdate) year = etree.SubElement(episode, "year") if myShow["firstaired"] is not None: try: year_text = str(datetime.datetime.strptime(myShow["firstaired"], '%Y-%m-%d').year) if year_text: year.text = year_text except: pass runtime = etree.SubElement(episode, "runtime") if curEpToWrite.season != 0: if myShow["runtime"] is not None: runtime.text = myShow["runtime"] genre = etree.SubElement(episode, "genre") if myShow["genre"] is not None: genre.text = " / ".join([x.strip() for x in myShow["genre"].split('|') if x and x.strip()]) director = etree.SubElement(episode, "director") director_text = myEp['director'] if director_text is not None: director.text = director_text if myShow["_actors"] is not None: for actor in myShow["_actors"]: cur_actor_name_text = actor['name'] if cur_actor_name_text is not None and cur_actor_name_text.strip(): cur_actor = etree.SubElement(episode, "actor") cur_actor_name = etree.SubElement(cur_actor, "name") cur_actor_name.text = cur_actor_name_text.strip() cur_actor_role = etree.SubElement(cur_actor, "role") cur_actor_role_text = actor['role'] if cur_actor_role_text is not None: cur_actor_role.text = cur_actor_role_text overview = etree.SubElement(episode, "overview") if curEpToWrite.description is not None: overview.text = curEpToWrite.description # Make it purdy helpers.indentXML(rootNode) data = etree.ElementTree(rootNode) return data # present a standard "interface" from the module metadata_class = WDTVMetadata
10,943
Python
.py
223
35.38565
178
0.574305
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,360
generic.py
midgetspy_Sick-Beard/sickbeard/metadata/generic.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os.path try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree import re import sickbeard from sickbeard import exceptions, helpers from sickbeard.metadata import helpers as metadata_helpers from sickbeard import logger from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex from lib.tvdb_api import tvdb_api, tvdb_exceptions class GenericMetadata(): """ Base class for all metadata providers. Default behavior is meant to mostly follow XBMC 12+ metadata standards. Has support for: - show metadata file - episode metadata file - episode thumbnail - show fanart - show poster - show banner - season thumbnails (poster) - season thumbnails (banner) - season all poster - season all banner """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): self.name = "Generic" self._ep_nfo_extension = "nfo" self._show_metadata_filename = "tvshow.nfo" self.fanart_name = "fanart.jpg" self.poster_name = "poster.jpg" self.banner_name = "banner.jpg" self.season_all_poster_name = "season-all-poster.jpg" self.season_all_banner_name = "season-all-banner.jpg" self.show_metadata = show_metadata self.episode_metadata = episode_metadata self.fanart = fanart self.poster = poster self.banner = banner self.episode_thumbnails = episode_thumbnails self.season_posters = season_posters self.season_banners = season_banners self.season_all_poster = season_all_poster self.season_all_banner = season_all_banner def get_config(self): config_list = [self.show_metadata, self.episode_metadata, self.fanart, self.poster, self.banner, self.episode_thumbnails, self.season_posters, self.season_banners, self.season_all_poster, self.season_all_banner] return '|'.join([str(int(x)) for x in config_list]) def get_id(self): return GenericMetadata.makeID(self.name) @staticmethod def makeID(name): name_id = re.sub("[+]", "plus", name) name_id = re.sub("[^\w\d_]", "_", name_id).lower() return name_id def set_config(self, string): config_list = [bool(int(x)) for x in string.split('|')] self.show_metadata = config_list[0] self.episode_metadata = config_list[1] self.fanart = config_list[2] self.poster = config_list[3] self.banner = config_list[4] self.episode_thumbnails = config_list[5] self.season_posters = config_list[6] self.season_banners = config_list[7] self.season_all_poster = config_list[8] self.season_all_banner = config_list[9] def _has_show_metadata(self, show_obj): result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj)) logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_episode_metadata(self, ep_obj): result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj)) logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_fanart(self, show_obj): result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj)) logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_poster(self, show_obj): result = ek.ek(os.path.isfile, self.get_poster_path(show_obj)) logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_banner(self, show_obj): result = ek.ek(os.path.isfile, self.get_banner_path(show_obj)) logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_episode_thumb(self, ep_obj): location = self.get_episode_thumb_path(ep_obj) result = location is not None and ek.ek(os.path.isfile, location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_poster(self, show_obj, season): location = self.get_season_poster_path(show_obj, season) result = location is not None and ek.ek(os.path.isfile, location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_banner(self, show_obj, season): location = self.get_season_banner_path(show_obj, season) result = location is not None and ek.ek(os.path.isfile, location) if location: logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG) return result def _has_season_all_poster(self, show_obj): result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj)) logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def _has_season_all_banner(self, show_obj): result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj)) logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG) return result def get_show_file_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename) def get_episode_file_path(self, ep_obj): return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension) def get_fanart_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self.fanart_name) def get_poster_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self.poster_name) def get_banner_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self.banner_name) def get_episode_thumb_path(self, ep_obj): """ Returns the path where the episode thumbnail should be stored. ep_obj: a TVEpisode instance for which to create the thumbnail """ if ek.ek(os.path.isfile, ep_obj.location): tbn_filename = ep_obj.location.rpartition(".") if tbn_filename[0] == "": tbn_filename = ep_obj.location + "-thumb.jpg" else: tbn_filename = tbn_filename[0] + "-thumb.jpg" else: return None return tbn_filename def get_season_poster_path(self, show_obj, season): """ Returns the full path to the file for a given season poster. show_obj: a TVShow instance for which to generate the path season: a season number to be used for the path. Note that season 0 means specials. """ # Our specials thumbnail is, well, special if season == 0: season_poster_filename = 'season-specials' else: season_poster_filename = 'season' + str(season).zfill(2) return ek.ek(os.path.join, show_obj.location, season_poster_filename + '-poster.jpg') def get_season_banner_path(self, show_obj, season): """ Returns the full path to the file for a given season banner. show_obj: a TVShow instance for which to generate the path season: a season number to be used for the path. Note that season 0 means specials. """ # Our specials thumbnail is, well, special if season == 0: season_banner_filename = 'season-specials' else: season_banner_filename = 'season' + str(season).zfill(2) return ek.ek(os.path.join, show_obj.location, season_banner_filename + '-banner.jpg') def get_season_all_poster_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self.season_all_poster_name) def get_season_all_banner_path(self, show_obj): return ek.ek(os.path.join, show_obj.location, self.season_all_banner_name) def _show_data(self, show_obj): """ This should be overridden by the implementing class. It should provide the content of the show metadata file. """ return None def _ep_data(self, ep_obj): """ This should be overridden by the implementing class. It should provide the content of the episode metadata file. """ return None def create_show_metadata(self, show_obj): if self.show_metadata and show_obj and not self._has_show_metadata(show_obj): logger.log(u"Metadata provider " + self.name + " creating show metadata for " + show_obj.name, logger.DEBUG) return self.write_show_file(show_obj) return False def create_episode_metadata(self, ep_obj): if self.episode_metadata and ep_obj and not self._has_episode_metadata(ep_obj): logger.log(u"Metadata provider " + self.name + " creating episode metadata for " + ep_obj.prettyName(), logger.DEBUG) return self.write_ep_file(ep_obj) return False def create_fanart(self, show_obj): if self.fanart and show_obj and not self._has_fanart(show_obj): logger.log(u"Metadata provider " + self.name + " creating fanart for " + show_obj.name, logger.DEBUG) return self.save_fanart(show_obj) return False def create_poster(self, show_obj): if self.poster and show_obj and not self._has_poster(show_obj): logger.log(u"Metadata provider " + self.name + " creating poster for " + show_obj.name, logger.DEBUG) return self.save_poster(show_obj) return False def create_banner(self, show_obj): if self.banner and show_obj and not self._has_banner(show_obj): logger.log(u"Metadata provider " + self.name + " creating banner for " + show_obj.name, logger.DEBUG) return self.save_banner(show_obj) return False def create_episode_thumb(self, ep_obj): if self.episode_thumbnails and ep_obj and not self._has_episode_thumb(ep_obj): logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.prettyName(), logger.DEBUG) return self.save_thumbnail(ep_obj) return False def create_season_posters(self, show_obj): if self.season_posters and show_obj: result = [] for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable if not self._has_season_poster(show_obj, season): logger.log(u"Metadata provider " + self.name + " creating season posters for " + show_obj.name, logger.DEBUG) result = result + [self.save_season_posters(show_obj, season)] return all(result) return False def create_season_banners(self, show_obj): if self.season_banners and show_obj: result = [] for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable if not self._has_season_banner(show_obj, season): logger.log(u"Metadata provider " + self.name + " creating season banners for " + show_obj.name, logger.DEBUG) result = result + [self.save_season_banners(show_obj, season)] return all(result) return False def create_season_all_poster(self, show_obj): if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj): logger.log(u"Metadata provider " + self.name + " creating season all poster for " + show_obj.name, logger.DEBUG) return self.save_season_all_poster(show_obj) return False def create_season_all_banner(self, show_obj): if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj): logger.log(u"Metadata provider " + self.name + " creating season all banner for " + show_obj.name, logger.DEBUG) return self.save_season_all_banner(show_obj) return False def _get_episode_thumb_url(self, ep_obj): """ Returns the URL to use for downloading an episode's thumbnail. Uses theTVDB.com data. ep_obj: a TVEpisode object for which to grab the thumb URL """ all_eps = [ep_obj] + ep_obj.relatedEps tvdb_lang = ep_obj.show.lang # get a TVDB object try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) tvdb_show_obj = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(e.message) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + ex(e), logger.ERROR) return None # try all included episodes in case some have thumbs and others don't for cur_ep in all_eps: try: myEp = tvdb_show_obj[cur_ep.season][cur_ep.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(cur_ep.season) + "x" + str(cur_ep.episode) + " on tvdb... has it been removed? Should I delete from db?") continue thumb_url = myEp["filename"] if thumb_url: return thumb_url return None def write_show_file(self, show_obj): """ Generates and writes show_obj's metadata under the given path to the filename given by get_show_file_path() show_obj: TVShow object for which to create the metadata path: An absolute or relative path where we should put the file. Note that the file name will be the default show_file_name. Note that this method expects that _show_data will return an ElementTree object. If your _show_data returns data in another format you'll need to override this method. """ data = self._show_data(show_obj) if not data: return False nfo_file_path = self.get_show_file_path(show_obj) nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path) try: if not ek.ek(os.path.isdir, nfo_file_dir): logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) ek.ek(os.makedirs, nfo_file_dir) helpers.chmodAsParent(nfo_file_dir) logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG) nfo_file = ek.ek(open, nfo_file_path, 'w') if nfo_file_path.endswith('.xml'): nfo_file.write('<?xml version="1.0" encoding="UTF-8"?>\n') data.write(nfo_file, encoding="utf-8") nfo_file.close() helpers.chmodAsParent(nfo_file_path) except IOError, e: logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR) return False return True def write_ep_file(self, ep_obj): """ Generates and writes ep_obj's metadata under the given path with the given filename root. Uses the episode's name with the extension in _ep_nfo_extension. ep_obj: TVEpisode object for which to create the metadata file_name_path: The file name to use for this metadata. Note that the extension will be automatically added based on _ep_nfo_extension. This should include an absolute path. Note that this method expects that _ep_data will return an ElementTree object. If your _ep_data returns data in another format you'll need to override this method. """ data = self._ep_data(ep_obj) if not data: return False nfo_file_path = self.get_episode_file_path(ep_obj) nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path) try: if not ek.ek(os.path.isdir, nfo_file_dir): logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) ek.ek(os.makedirs, nfo_file_dir) helpers.chmodAsParent(nfo_file_dir) logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG) nfo_file = ek.ek(open, nfo_file_path, 'w') if nfo_file_path.endswith('.xml'): nfo_file.write('<?xml version="1.0" encoding="UTF-8"?>\n') data.write(nfo_file, encoding="utf-8") nfo_file.close() helpers.chmodAsParent(nfo_file_path) except IOError, e: logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR) return False return True def save_thumbnail(self, ep_obj): """ Retrieves a thumbnail and saves it to the correct spot. This method should not need to be overridden by implementing classes, changing get_episode_thumb_path and _get_episode_thumb_url should suffice. ep_obj: a TVEpisode object for which to generate a thumbnail """ file_path = self.get_episode_thumb_path(ep_obj) if not file_path: logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG) return False thumb_url = self._get_episode_thumb_url(ep_obj) # if we can't find one then give up if not thumb_url: logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG) return False thumb_data = metadata_helpers.getShowImage(thumb_url) result = self._write_image(thumb_data, file_path) if not result: return False for cur_ep in [ep_obj] + ep_obj.relatedEps: cur_ep.hastbn = True return True def save_fanart(self, show_obj, which=None): """ Downloads a fanart image and saves it to the filename specified by fanart_name inside the show's root folder. show_obj: a TVShow object for which to download fanart """ # use the default fanart name fanart_path = self.get_fanart_path(show_obj) fanart_data = self._retrieve_show_image('fanart', show_obj, which) if not fanart_data: logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG) return False return self._write_image(fanart_data, fanart_path) def save_poster(self, show_obj, which=None): """ Downloads a poster image and saves it to the filename specified by poster_name inside the show's root folder. show_obj: a TVShow object for which to download a poster """ # use the default poster name poster_path = self.get_poster_path(show_obj) poster_data = self._retrieve_show_image('poster', show_obj, which) if not poster_data: logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG) return False return self._write_image(poster_data, poster_path) def save_banner(self, show_obj, which=None): """ Downloads a banner image and saves it to the filename specified by banner_name inside the show's root folder. show_obj: a TVShow object for which to download a banner """ # use the default banner name banner_path = self.get_banner_path(show_obj) banner_data = self._retrieve_show_image('banner', show_obj, which) if not banner_data: logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG) return False return self._write_image(banner_data, banner_path) def save_season_posters(self, show_obj, season): """ Saves all season posters to disk for the given show. show_obj: a TVShow object for which to save the season thumbs Cycles through all seasons and saves the season posters if possible. This method should not need to be overridden by implementing classes, changing _season_posters_dict and get_season_poster_path should be good enough. """ season_dict = self._season_posters_dict(show_obj, season) result = [] # Returns a nested dictionary of season art with the season # number as primary key. It's really overkill but gives the option # to present to user via ui to pick down the road. for cur_season in season_dict: cur_season_art = season_dict[cur_season] if len(cur_season_art) == 0: continue # Just grab whatever's there for now art_id, season_url = cur_season_art.popitem() # @UnusedVariable season_poster_file_path = self.get_season_poster_path(show_obj, cur_season) if not season_poster_file_path: logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG) continue seasonData = metadata_helpers.getShowImage(season_url) if not seasonData: logger.log(u"No season poster data available, skipping this season", logger.DEBUG) continue result = result + [self._write_image(seasonData, season_poster_file_path)] if result: return all(result) else: return False return True def save_season_banners(self, show_obj, season): """ Saves all season banners to disk for the given show. show_obj: a TVShow object for which to save the season thumbs Cycles through all seasons and saves the season banners if possible. This method should not need to be overridden by implementing classes, changing _season_banners_dict and get_season_banner_path should be good enough. """ season_dict = self._season_banners_dict(show_obj, season) result = [] # Returns a nested dictionary of season art with the season # number as primary key. It's really overkill but gives the option # to present to user via ui to pick down the road. for cur_season in season_dict: cur_season_art = season_dict[cur_season] if len(cur_season_art) == 0: continue # Just grab whatever's there for now art_id, season_url = cur_season_art.popitem() # @UnusedVariable season_banner_file_path = self.get_season_banner_path(show_obj, cur_season) if not season_banner_file_path: logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season", logger.DEBUG) continue seasonData = metadata_helpers.getShowImage(season_url) if not seasonData: logger.log(u"No season banner data available, skipping this season", logger.DEBUG) continue result = result + [self._write_image(seasonData, season_banner_file_path)] if result: return all(result) else: return False return True def save_season_all_poster(self, show_obj, which=None): # use the default season all poster name poster_path = self.get_season_all_poster_path(show_obj) poster_data = self._retrieve_show_image('poster', show_obj, which) if not poster_data: logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG) return False return self._write_image(poster_data, poster_path) def save_season_all_banner(self, show_obj, which=None): # use the default season all banner name banner_path = self.get_season_all_banner_path(show_obj) banner_data = self._retrieve_show_image('banner', show_obj, which) if not banner_data: logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG) return False return self._write_image(banner_data, banner_path) def _write_image(self, image_data, image_path): """ Saves the data in image_data to the location image_path. Returns True/False to represent success or failure. image_data: binary image data to write to file image_path: file location to save the image to """ # don't bother overwriting it if ek.ek(os.path.isfile, image_path): logger.log(u"Image already exists, not downloading", logger.DEBUG) return False if not image_data: logger.log(u"Unable to retrieve image, skipping", logger.WARNING) return False image_dir = ek.ek(os.path.dirname, image_path) try: if not ek.ek(os.path.isdir, image_dir): logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG) ek.ek(os.makedirs, image_dir) helpers.chmodAsParent(image_dir) outFile = ek.ek(open, image_path, 'wb') outFile.write(image_data) outFile.close() helpers.chmodAsParent(image_path) except IOError, e: logger.log(u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e), logger.ERROR) return False return True def _retrieve_show_image(self, image_type, show_obj, which=None): """ Gets an image URL from theTVDB.com, downloads it and returns the data. image_type: type of image to retrieve (currently supported: fanart, poster, banner) show_obj: a TVShow object to use when searching for the image which: optional, a specific numbered poster to look for Returns: the binary image data if available, or else None """ tvdb_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms) tvdb_show_obj = t[show_obj.tvdbid] except (tvdb_exceptions.tvdb_error, IOError), e: logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR) return None if image_type not in ('fanart', 'poster', 'banner'): logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the TVDB object", logger.ERROR) return None image_url = tvdb_show_obj[image_type] image_data = metadata_helpers.getShowImage(image_url, which) return image_data def _season_posters_dict(self, show_obj, season): """ Should return a dict like: result = {<season number>: {1: '<url 1>', 2: <url 2>, ...},} """ # This holds our resulting dictionary of season art result = {} tvdb_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms) tvdb_show_obj = t[show_obj.tvdbid] except (tvdb_exceptions.tvdb_error, IOError), e: logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR) return result # if we have no season banners then just finish if 'season' not in tvdb_show_obj['_banners'] or 'season' not in tvdb_show_obj['_banners']['season']: return result # Give us just the normal poster-style season graphics seasonsArtObj = tvdb_show_obj['_banners']['season']['season'] # Returns a nested dictionary of season art with the season # number as primary key. It's really overkill but gives the option # to present to user via ui to pick down the road. result[season] = {} # find the correct season in the tvdb object and just copy the dict into our result dict for seasonArtID in seasonsArtObj.keys(): if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en': result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath'] return result def _season_banners_dict(self, show_obj, season): """ Should return a dict like: result = {<season number>: {1: '<url 1>', 2: <url 2>, ...},} """ # This holds our resulting dictionary of season art result = {} tvdb_lang = show_obj.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(banners=True, **ltvdb_api_parms) tvdb_show_obj = t[show_obj.tvdbid] except (tvdb_exceptions.tvdb_error, IOError), e: logger.log(u"Unable to look up show on TVDB, not downloading images: " + ex(e), logger.ERROR) return result # if we have no season banners then just finish if 'season' not in tvdb_show_obj['_banners'] or 'seasonwide' not in tvdb_show_obj['_banners']['season']: return result # Give us just the normal season graphics seasonsArtObj = tvdb_show_obj['_banners']['season']['seasonwide'] # Returns a nested dictionary of season art with the season # number as primary key. It's really overkill but gives the option # to present to user via ui to pick down the road. result[season] = {} # find the correct season in the tvdb object and just copy the dict into our result dict for seasonArtID in seasonsArtObj.keys(): if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == 'en': result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath'] return result def retrieveShowMetadata(self, folder): """ Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB. """ empty_return = (None, None) metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename) if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path): logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG) return empty_return logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG) try: with ek.ek(open, metadata_path, 'r') as xmlFileObj: showXML = etree.ElementTree(file=xmlFileObj) if showXML.findtext('title') == None or (showXML.findtext('tvdbid') == None and showXML.findtext('id') == None): logger.log(u"Invalid info in tvshow.nfo (missing name or id):" \ + str(showXML.findtext('title')) + " " \ + str(showXML.findtext('tvdbid')) + " " \ + str(showXML.findtext('id'))) return empty_return name = showXML.findtext('title') if showXML.findtext('tvdbid') is not None: tvdb_id = int(showXML.findtext('tvdbid')) elif showXML.findtext('id'): tvdb_id = int(showXML.findtext('id')) else: logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find an ID", logger.WARNING) return empty_return if not tvdb_id: logger.log(u"Invalid tvdb id (" + str(tvdb_id) + "), not using metadata file", logger.WARNING) return empty_return except Exception, e: logger.log(u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e), logger.WARNING) return empty_return return (tvdb_id, name)
35,433
Python
.py
666
41.576577
220
0.617537
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,361
__init__.py
midgetspy_Sick-Beard/sickbeard/metadata/__init__.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. __all__ = ['generic', 'helpers', 'mede8er', 'mediabrowser', 'ps3', 'tivo', 'wdtv', 'xbmc', 'xbmc_12plus'] import sys import mede8er import mediabrowser import ps3 import tivo import wdtv import xbmc import xbmc_12plus def available_generators(): return filter(lambda x: x not in ('generic', 'helpers'), __all__) def _getMetadataModule(name): name = name.lower() prefix = "sickbeard.metadata." if name in __all__ and prefix + name in sys.modules: return sys.modules[prefix + name] else: return None def _getMetadataClass(name): module = _getMetadataModule(name) if not module: return None return module.metadata_class() def get_metadata_generator_dict(): result = {} for cur_generator_id in available_generators(): cur_generator = _getMetadataClass(cur_generator_id) if not cur_generator: continue result[cur_generator.name] = cur_generator return result
1,785
Python
.py
48
32.395833
106
0.701816
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,362
ps3.py
midgetspy_Sick-Beard/sickbeard/metadata/ps3.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import os import generic from sickbeard import encodingKludge as ek class PS3Metadata(generic.GenericMetadata): """ Metadata generation class for Sony PS3. The following file structure is used: show_root/cover.jpg (poster) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.ext.cover.jpg (episode thumb) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = "Sony PS3" self.poster_name = "cover.jpg" # web-ui metadata template self.eg_show_metadata = "<i>not supported</i>" self.eg_episode_metadata = "<i>not supported</i>" self.eg_fanart = "<i>not supported</i>" self.eg_poster = "cover.jpg" self.eg_banner = "<i>not supported</i>" self.eg_episode_thumbnails = "Season##\\<i>filename</i>.ext.cover.jpg" self.eg_season_posters = "<i>not supported</i>" self.eg_season_banners = "<i>not supported</i>" self.eg_season_all_poster = "<i>not supported</i>" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def retrieveShowMetadata(self, folder): # no show metadata generated, we abort this lookup function return (None, None) def create_show_metadata(self, show_obj): pass def get_show_file_path(self, show_obj): pass def create_episode_metadata(self, ep_obj): pass def create_fanart(self, show_obj): pass def create_banner(self, show_obj): pass def create_season_posters(self, show_obj): pass def create_season_banners(self, ep_obj): pass def create_season_all_poster(self, show_obj): pass def create_season_all_banner(self, show_obj): pass def get_episode_thumb_path(self, ep_obj): """ Returns the path where the episode thumbnail should be stored. Defaults to the same path as the episode file but with a .cover.jpg extension. ep_obj: a TVEpisode instance for which to create the thumbnail """ if ek.ek(os.path.isfile, ep_obj.location): tbn_filename = ep_obj.location + ".cover.jpg" else: return None return tbn_filename # present a standard "interface" from the module metadata_class = PS3Metadata
4,102
Python
.py
98
30.908163
82
0.586623
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,363
xbmc.py
midgetspy_Sick-Beard/sickbeard/metadata/xbmc.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import xbmc_12plus import os from sickbeard import helpers from sickbeard import encodingKludge as ek class XBMCMetadata(xbmc_12plus.XBMC_12PlusMetadata): """ Metadata generation class for XBMC (legacy). The following file structure is used: show_root/tvshow.nfo (show metadata) show_root/fanart.jpg (fanart) show_root/folder.jpg (poster) show_root/folder.jpg (banner) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.nfo (episode metadata) show_root/Season ##/filename.tbn (episode thumb) show_root/season##.tbn (season posters) show_root/season-all.tbn (season all poster) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): xbmc_12plus.XBMC_12PlusMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'XBMC' self.poster_name = self.banner_name = "folder.jpg" self.season_all_poster_name = "season-all.tbn" # web-ui metadata template # self.eg_show_metadata = "tvshow.nfo" # self.eg_episode_metadata = "Season##\\<i>filename</i>.nfo" # self.eg_fanart = "fanart.jpg" self.eg_poster = "folder.jpg" self.eg_banner = "folder.jpg" self.eg_episode_thumbnails = "Season##\\<i>filename</i>.tbn" self.eg_season_posters = "season##.tbn" self.eg_season_banners = "<i>not supported</i>" self.eg_season_all_poster = "season-all.tbn" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def create_season_banners(self, ep_obj): pass def create_season_all_banner(self, show_obj): pass def get_episode_thumb_path(self, ep_obj): """ Returns the path where the episode thumbnail should be stored. Defaults to the same path as the episode file but with a .tbn extension. ep_obj: a TVEpisode instance for which to create the thumbnail """ if ek.ek(os.path.isfile, ep_obj.location): tbn_filename = helpers.replaceExtension(ep_obj.location, 'tbn') else: return None return tbn_filename def get_season_poster_path(self, show_obj, season): """ Returns the full path to the file for a given season poster. show_obj: a TVShow instance for which to generate the path season: a season number to be used for the path. Note that season 0 means specials. """ # Our specials thumbnail is, well, special if season == 0: season_poster_filename = 'season-specials' else: season_poster_filename = 'season' + str(season).zfill(2) return ek.ek(os.path.join, show_obj.location, season_poster_filename + '.tbn') # present a standard "interface" from the module metadata_class = XBMCMetadata
4,694
Python
.py
102
33.843137
87
0.578311
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,364
helpers.py
midgetspy_Sick-Beard/sickbeard/metadata/helpers.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from sickbeard import helpers from sickbeard import logger def getShowImage(url, imgNum=None): image_data = None # @UnusedVariable if url is None: return None # if they provided a fanart number try to use it instead if imgNum is not None: tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg" else: tempURL = url logger.log(u"Fetching image from " + tempURL, logger.DEBUG) image_data = helpers.getURL(tempURL) if image_data is None: logger.log(u"There was an error trying to retrieve the image, aborting", logger.ERROR) return None return image_data
1,432
Python
.py
34
37.294118
95
0.708273
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,365
tivo.py
midgetspy_Sick-Beard/sickbeard/metadata/tivo.py
# Author: Nic Wolfe <[email protected]> # Author: Gordon Turner <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import os import sickbeard from sickbeard import logger, exceptions, helpers from sickbeard.metadata import generic from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex from lib.tvdb_api import tvdb_api, tvdb_exceptions class TIVOMetadata(generic.GenericMetadata): """ Metadata generation class for TIVO The following file structure is used: show_root/Season ##/filename.ext (*) show_root/Season ##/.meta/filename.ext.txt (episode metadata) This class only generates episode specific metadata files, it does NOT generate a default.txt file. """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'TIVO' self._ep_nfo_extension = "txt" # web-ui metadata template self.eg_show_metadata = "<i>not supported</i>" self.eg_episode_metadata = "Season##\\.meta\\<i>filename</i>.ext.txt" self.eg_fanart = "<i>not supported</i>" self.eg_poster = "<i>not supported</i>" self.eg_banner = "<i>not supported</i>" self.eg_episode_thumbnails = "<i>not supported</i>" self.eg_season_posters = "<i>not supported</i>" self.eg_season_banners = "<i>not supported</i>" self.eg_season_all_poster = "<i>not supported</i>" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def retrieveShowMetadata(self, folder): # no show metadata generated, we abort this lookup function return (None, None) def create_show_metadata(self, show_obj): pass def get_show_file_path(self, show_obj): pass def create_fanart(self, show_obj): pass def create_poster(self, show_obj): pass def create_banner(self, show_obj): pass def create_episode_thumb(self, ep_obj): pass def get_episode_thumb_path(self, ep_obj): pass def create_season_posters(self, ep_obj): pass def create_season_banners(self, ep_obj): pass def create_season_all_poster(self, show_obj): pass def create_season_all_banner(self, show_obj): pass # Override generic class def get_episode_file_path(self, ep_obj): """ Returns a full show dir/.meta/episode.txt path for Tivo episode metadata files. Note, that pyTivo requires the metadata filename to include the original extention. ie If the episode name is foo.avi, the metadata name is foo.avi.txt ep_obj: a TVEpisode object to get the path for """ if ek.ek(os.path.isfile, ep_obj.location): metadata_file_name = ek.ek(os.path.basename, ep_obj.location) + "." + self._ep_nfo_extension metadata_dir_name = ek.ek(os.path.join, ek.ek(os.path.dirname, ep_obj.location), '.meta') metadata_file_path = ek.ek(os.path.join, metadata_dir_name, metadata_file_name) else: logger.log(u"Episode location doesn't exist: " + str(ep_obj.location), logger.DEBUG) return '' return metadata_file_path def _ep_data(self, ep_obj): """ Creates a key value structure for a Tivo episode metadata file and returns the resulting data object. ep_obj: a TVEpisode instance to create the metadata file for. Lookup the show in http://thetvdb.com/ using the python library: https://github.com/dbr/tvdb_api/ The results are saved in the object myShow. The key values for the tivo metadata file are from: http://pytivo.sourceforge.net/wiki/index.php/Metadata """ data = "" eps_to_write = [ep_obj] + ep_obj.relatedEps tvdb_lang = ep_obj.show.lang try: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() if tvdb_lang and not tvdb_lang == 'en': ltvdb_api_parms['language'] = tvdb_lang t = tvdb_api.Tvdb(actors=True, **ltvdb_api_parms) myShow = t[ep_obj.show.tvdbid] except tvdb_exceptions.tvdb_shownotfound, e: raise exceptions.ShowNotFoundException(str(e)) except tvdb_exceptions.tvdb_error, e: logger.log(u"Unable to connect to TVDB while creating meta files - skipping - " + str(e), logger.ERROR) return False for curEpToWrite in eps_to_write: try: myEp = myShow[curEpToWrite.season][curEpToWrite.episode] except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound): logger.log(u"Unable to find episode " + str(curEpToWrite.season) + "x" + str(curEpToWrite.episode) + " on tvdb... has it been removed? Should I delete from db?") return None if myEp["firstaired"] is None and ep_obj.season == 0: myEp["firstaired"] = str(datetime.date.fromordinal(1)) if myEp["episodename"] is None or myEp["firstaired"] is None: return None if myShow["seriesname"] is not None: data += ("title : " + myShow["seriesname"] + "\n") data += ("seriesTitle : " + myShow["seriesname"] + "\n") data += ("episodeTitle : " + curEpToWrite._format_pattern('%Sx%0E %EN') + "\n") # This should be entered for episodic shows and omitted for movies. The standard tivo format is to enter # the season number followed by the episode number for that season. For example, enter 201 for season 2 # episode 01. # This only shows up if you go into the Details from the Program screen. # This seems to disappear once the video is transferred to TiVo. # NOTE: May not be correct format, missing season, but based on description from wiki leaving as is. data += ("episodeNumber : " + str(curEpToWrite.episode) + "\n") # Must be entered as true or false. If true, the year from originalAirDate will be shown in parentheses # after the episode's title and before the description on the Program screen. # FIXME: Hardcode isEpisode to true for now, not sure how to handle movies data += ("isEpisode : true\n") # Write the synopsis of the video here # Micrsoft Word's smartquotes can die in a fire. sanitizedDescription = curEpToWrite.description # Replace double curly quotes sanitizedDescription = sanitizedDescription.replace(u"\u201c", "\"").replace(u"\u201d", "\"") # Replace single curly quotes sanitizedDescription = sanitizedDescription.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u02BC", "'") data += ("description : " + sanitizedDescription + "\n") # Usually starts with "SH" and followed by 6-8 digits. # Tivo uses zap2it for their data, so the series id is the zap2it_id. if myShow["zap2it_id"] is not None: data += ("seriesId : " + myShow["zap2it_id"] + "\n") # This is the call sign of the channel the episode was recorded from. if myShow["network"] is not None: data += ("callsign : " + myShow["network"] + "\n") # This must be entered as yyyy-mm-ddThh:mm:ssZ (the t is capitalized and never changes, the Z is also # capitalized and never changes). This is the original air date of the episode. # NOTE: Hard coded the time to T00:00:00Z as we really don't know when during the day the first run happened. if curEpToWrite.airdate != datetime.date.fromordinal(1): data += ("originalAirDate : " + str(curEpToWrite.airdate) + "T00:00:00Z\n") # This shows up at the beginning of the description on the Program screen and on the Details screen. if myShow["actors"]: for actor in myShow["actors"].split('|'): if actor is not None and actor.strip(): data += ("vActor : " + actor.strip() + "\n") # This is shown on both the Program screen and the Details screen. if myEp["rating"] is not None: try: rating = float(myEp['rating']) except ValueError: rating = 0.0 # convert 10 to 4 star rating. 4 * rating / 10 # only whole numbers or half numbers work. multiply by 2, round, divide by 2.0 rating = round(8 * rating / 10) / 2.0 data += ("starRating : " + str(rating) + "\n") # This is shown on both the Program screen and the Details screen. # It uses the standard TV rating system of: TV-Y7, TV-Y, TV-G, TV-PG, TV-14, TV-MA and TV-NR. if myShow["contentrating"]: data += ("tvRating : " + str(myShow["contentrating"]) + "\n") # This field can be repeated as many times as necessary or omitted completely. if ep_obj.show.genre: for genre in ep_obj.show.genre.split('|'): if genre and genre.strip(): data += ("vProgramGenre : " + str(genre.strip()) + "\n") # NOTE: The following are metadata keywords are not used # displayMajorNumber # showingBits # displayMinorNumber # colorCode # vSeriesGenre # vGuestStar, vDirector, vExecProducer, vProducer, vWriter, vHost, vChoreographer # partCount # partIndex return data def write_ep_file(self, ep_obj): """ Generates and writes ep_obj's metadata under the given path with the given filename root. Uses the episode's name with the extension in _ep_nfo_extension. ep_obj: TVEpisode object for which to create the metadata file_name_path: The file name to use for this metadata. Note that the extension will be automatically added based on _ep_nfo_extension. This should include an absolute path. """ data = self._ep_data(ep_obj) if not data: return False nfo_file_path = self.get_episode_file_path(ep_obj) nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path) try: if not ek.ek(os.path.isdir, nfo_file_dir): logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG) ek.ek(os.makedirs, nfo_file_dir) helpers.chmodAsParent(nfo_file_dir) logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG) with ek.ek(open, nfo_file_path, 'w') as nfo_file: # Calling encode directly, b/c often descriptions have wonky characters. nfo_file.write(data.encode("utf-8")) helpers.chmodAsParent(nfo_file_path) except EnvironmentError, e: logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e), logger.ERROR) return False return True # present a standard "interface" from the module metadata_class = TIVOMetadata
13,235
Python
.py
250
40.948
177
0.602247
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,366
omgwtfnzbs.py
midgetspy_Sick-Beard/sickbeard/providers/omgwtfnzbs.py
# Author: Jordon Smith <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import generic import sickbeard from sickbeard import tvcache from sickbeard import helpers from sickbeard import classes from sickbeard import logger from sickbeard.exceptions import ex, AuthException from sickbeard import show_name_helpers from datetime import datetime try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree try: import json except ImportError: from lib import simplejson as json class OmgwtfnzbsProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "omgwtfnzbs") self.cache = OmgwtfnzbsCache(self) self.url = 'https://omgwtfnzbs.org/' self.supportsBacklog = True def isEnabled(self): return sickbeard.OMGWTFNZBS def _checkAuth(self): if not sickbeard.OMGWTFNZBS_USERNAME or not sickbeard.OMGWTFNZBS_APIKEY: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsed_data, is_XML=True): if parsed_data is None: return self._checkAuth() if is_XML: # provider doesn't return xml on error return True else: parsedJSON = parsed_data if 'notice' in parsedJSON: description_text = parsedJSON.get('notice') if 'information is incorrect' in parsedJSON.get('notice'): logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG) raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.") elif 'please try again later' in parsedJSON.get('notice'): logger.log(self.name + u" down for maintenance, aborting", logger.DEBUG) return False elif '0 results matched your terms' in parsedJSON.get('notice'): return True else: logger.log(u"Unknown error given from " + self.name + " : " + str(description_text), logger.DEBUG) return False return True def _get_season_search_strings(self, show, season): return [x for x in show_name_helpers.makeSceneSeasonSearchString(show, season)] def _get_episode_search_strings(self, ep_obj): return [x for x in show_name_helpers.makeSceneSearchString(ep_obj)] def _get_title_and_url(self, item): return (item['release'].replace('_', '.'), item['getnzb']) def _doSearch(self, search, show=None, retention=0): self._checkAuth() params = {'user': sickbeard.OMGWTFNZBS_USERNAME, 'api': sickbeard.OMGWTFNZBS_APIKEY, 'eng': 1, 'nukes': 1, # show nuke info 'catid': '19,20', # SD,HD 'retention': sickbeard.USENET_RETENTION, 'search': search} if retention or not params['retention']: params['retention'] = retention search_url = 'https://api.omgwtfnzbs.org/json/?' + urllib.urlencode(params) logger.log(u"Search url: " + search_url, logger.DEBUG) data = self.getURL(search_url) if not data: logger.log(u"No data returned from " + search_url, logger.ERROR) return [] parsedJSON = helpers.parse_json(data) if parsedJSON is None: logger.log(u"Error trying to load " + self.name + " JSON data", logger.ERROR) return [] if self._checkAuthFromData(parsedJSON, is_XML=False): results = [] for item in parsedJSON: if 'nuked' in item and item['nuked'].startswith('1'): # logger.log(u"Skipping nuked release: " + item['release'], logger.DEBUG) continue if 'release' in item and 'getnzb' in item: results.append(item) return results return [] def findPropers(self, search_date=None): search_terms = ['.PROPER.', '.REPACK.'] results = [] for term in search_terms: for item in self._doSearch(term, retention=4): if 'usenetage' in item: title, url = self._get_title_and_url(item) try: result_date = datetime.fromtimestamp(int(item['usenetage'])) except: result_date = None if result_date: results.append(classes.Proper(title, url, result_date)) return results class OmgwtfnzbsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) self.minTime = 20 def _getRSSData(self): params = {'user': sickbeard.OMGWTFNZBS_USERNAME, 'api': sickbeard.OMGWTFNZBS_APIKEY, 'eng': 1, 'delay': 30, 'catid': '19,20'} # SD,HD rss_url = 'https://rss.omgwtfnzbs.org/rss-download.php?' + urllib.urlencode(params) logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None return data def _checkAuth(self, parsedXML): return self.provider._checkAuthFromData(parsedXML) provider = OmgwtfnzbsProvider()
6,422
Python
.py
140
35.65
134
0.619804
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,367
womble.py
midgetspy_Sick-Beard/sickbeard/providers/womble.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import generic from sickbeard import helpers from sickbeard import logger from sickbeard import tvcache try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree class WombleProvider(generic.NZBProvider): def __init__(self): generic.NZBProvider.__init__(self, "Womble's Index") self.cache = WombleCache(self) self.url = 'http://newshost.co.za/' def isEnabled(self): return sickbeard.WOMBLE class WombleCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll Womble's Index every 15 minutes max self.minTime = 15 def _getRSSData(self): RSS_data = None xml_element_tree = None for url in [self.provider.url + 'rss/?sec=tv-x264&fr=false', self.provider.url + 'rss/?sec=tv-dvd&fr=false']: logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG) data = self.provider.getURL(url) if data: parsedXML = helpers.parse_xml(data) if parsedXML: if xml_element_tree is None: xml_element_tree = parsedXML else: items = parsedXML.findall('.//item') if items: for item in items: xml_element_tree.append(item) if xml_element_tree is not None: RSS_data = etree.tostring(xml_element_tree) return RSS_data def _translateTitle(self, title): return title.replace(' ', '.').replace('_', '.') def _checkAuth(self, data): return data != 'Invalid Link' provider = WombleProvider()
2,626
Python
.py
62
33.354839
118
0.633413
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,368
hdbits.py
midgetspy_Sick-Beard/sickbeard/providers/hdbits.py
# This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import urllib import generic import sickbeard from sickbeard import classes from sickbeard import logger, tvcache, exceptions from sickbeard import helpers from sickbeard.common import Quality from sickbeard.exceptions import ex, AuthException from sickbeard.name_parser.parser import NameParser, InvalidNameException try: import json except ImportError: from lib import simplejson as json class HDBitsProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "HDBits") self.supportsBacklog = True self.cache = HDBitsCache(self) self.url = 'https://hdbits.org' self.search_url = 'https://hdbits.org/api/torrents' self.rss_url = 'https://hdbits.org/api/torrents' self.download_url = 'http://hdbits.org/download.php?' def isEnabled(self): return sickbeard.HDBITS def _checkAuth(self): if not sickbeard.HDBITS_USERNAME or not sickbeard.HDBITS_PASSKEY: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsedJSON): if parsedJSON is None: return self._checkAuth() if 'status' in parsedJSON and 'message' in parsedJSON: if parsedJSON.get('status') == 5: logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['message'], logger.DEBUG) raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.") return True def _get_season_search_strings(self, show, season): season_search_string = [self._make_post_data_JSON(show=show, season=season)] return season_search_string def _get_episode_search_strings(self, episode): episode_search_string = [self._make_post_data_JSON(show=episode.show, episode=episode)] return episode_search_string def _get_title_and_url(self, item): title = item['name'] if title: title = title.replace(' ', '.') url = self.download_url + urllib.urlencode({'id': item['id'], 'passkey': sickbeard.HDBITS_PASSKEY}) return (title, url) def _doSearch(self, search_params, show=None): self._checkAuth() logger.log(u"Search url: " + self.search_url + " search_params: " + search_params, logger.DEBUG) data = self.getURL(self.search_url, post_data=search_params) if not data: logger.log(u"No data returned from " + self.search_url, logger.ERROR) return [] parsedJSON = helpers.parse_json(data) if parsedJSON is None: logger.log(u"Error trying to load " + self.name + " JSON data", logger.ERROR) return [] if self._checkAuthFromData(parsedJSON): results = [] if parsedJSON and 'data' in parsedJSON: items = parsedJSON['data'] else: logger.log(u"Resulting JSON from " + self.name + " isn't correct, not parsing it", logger.ERROR) items = [] for item in items: results.append(item) return results def findPropers(self, search_date=None): results = [] search_terms = [' proper ', ' repack '] for term in search_terms: for item in self._doSearch(self._make_post_data_JSON(search_term=term)): if item['utadded']: try: result_date = datetime.datetime.fromtimestamp(int(item['utadded'])) except: result_date = None if result_date: if not search_date or result_date > search_date: title, url = self._get_title_and_url(item) results.append(classes.Proper(title, url, result_date)) return results def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None): post_data = { 'username': sickbeard.HDBITS_USERNAME, 'passkey': sickbeard.HDBITS_PASSKEY, 'category': [2], # TV Category } if episode: post_data['tvdb'] = { 'id': show.tvdbid, 'season': episode.season, 'episode': episode.episode } if season: post_data['tvdb'] = { 'id': show.tvdbid, 'season': season, } if search_term: post_data['search'] = search_term return json.dumps(post_data) class HDBitsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll HDBits every 15 minutes max self.minTime = 15 def updateCache(self): if not self.shouldUpdate(): return if self._checkAuth(None): data = self._getRSSData() # As long as we got something from the provider we count it as an update if data: self.setLastUpdate() else: return [] logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() parsedJSON = helpers.parse_json(data) if parsedJSON is None: logger.log(u"Error trying to load " + self.provider.name + " JSON feed", logger.ERROR) return [] if self._checkAuth(parsedJSON): if parsedJSON and 'data' in parsedJSON: items = parsedJSON['data'] else: logger.log(u"Resulting JSON from " + self.provider.name + " isn't correct, not parsing it", logger.ERROR) return [] for item in items: self._parseItem(item) else: raise exceptions.AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config") else: return [] def _getRSSData(self): return self.provider.getURL(self.provider.rss_url, post_data=self.provider._make_post_data_JSON()) def _parseItem(self, item): (title, url) = self.provider._get_title_and_url(item) if title and url: logger.log(u"Adding item to results: " + title, logger.DEBUG) self._addCacheEntry(title, url) else: logger.log(u"The data returned from the " + self.provider.name + " is incomplete, this result is unusable", logger.ERROR) return def _checkAuth(self, data): return self.provider._checkAuthFromData(data) provider = HDBitsProvider()
7,788
Python
.py
165
35.272727
138
0.599065
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,369
tvtorrents.py
midgetspy_Sick-Beard/sickbeard/providers/tvtorrents.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree import sickbeard import generic from sickbeard.exceptions import ex, AuthException from sickbeard import helpers from sickbeard import logger from sickbeard import tvcache class TvTorrentsProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "TvTorrents") self.supportsBacklog = False self.cache = TvTorrentsCache(self) self.url = 'http://www.tvtorrents.com/' def isEnabled(self): return sickbeard.TVTORRENTS def imageName(self): return 'tvtorrents.png' def _checkAuth(self): if not sickbeard.TVTORRENTS_DIGEST or not sickbeard.TVTORRENTS_HASH: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsedXML): if parsedXML is None: return self._checkAuth() description_text = helpers.get_xml_text(parsedXML.find('.//channel/description')) if "User can't be found" in description_text or "Invalid Hash" in description_text: logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG) raise AuthException(u"Your authentication credentials for " + self.name + " are incorrect, check your config") return True class TvTorrentsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll TvTorrents every 15 minutes max self.minTime = 15 def _getRSSData(self): # These will be ignored on the serverside. ignore_regex = "all.month|month.of|season[\s\d]*complete" rss_url = self.provider.url + 'RssServlet?digest=' + sickbeard.TVTORRENTS_DIGEST + '&hash=' + sickbeard.TVTORRENTS_HASH + '&fname=true&exclude=(' + ignore_regex + ')' logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None return data def _checkAuth(self, parsedXML): return self.provider._checkAuthFromData(parsedXML) provider = TvTorrentsProvider()
3,280
Python
.py
67
41.492537
175
0.69463
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,370
ezrss.py
midgetspy_Sick-Beard/sickbeard/providers/ezrss.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import re try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree import sickbeard import generic from sickbeard.common import Quality from sickbeard import logger from sickbeard import tvcache from sickbeard import helpers class EZRSSProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "EZRSS") self.supportsBacklog = True self.cache = EZRSSCache(self) self.url = 'https://eztv.ag/' def isEnabled(self): return sickbeard.EZRSS def imageName(self): return 'ezrss.png' def getQuality(self, item): filename = helpers.get_xml_text(item.find('{//xmlns.ezrss.it/0.1/}torrent/{//xmlns.ezrss.it/0.1/}fileName')) quality = Quality.nameQuality(filename) return quality def findSeasonResults(self, show, season): results = {} if show.air_by_date: logger.log(self.name + u" doesn't support air-by-date backlog because of limitations on their RSS search.", logger.WARNING) return results results = generic.TorrentProvider.findSeasonResults(self, show, season) return results def _get_season_search_strings(self, show, season=None): params = {} if not show: return params params['show_name'] = helpers.sanitizeSceneName(show.name, ezrss=True).replace('.', ' ').encode('utf-8') if season is not None: params['season'] = season return [params] def _get_episode_search_strings(self, ep_obj): params = {} if not ep_obj: return params params['show_name'] = helpers.sanitizeSceneName(ep_obj.show.name, ezrss=True).replace('.', ' ').encode('utf-8') if ep_obj.show.air_by_date: params['date'] = str(ep_obj.airdate) else: params['season'] = ep_obj.season params['episode'] = ep_obj.episode return [params] def _doSearch(self, search_params, show=None): params = {"mode": "rss"} if search_params: params.update(search_params) search_url = self.url + 'search/?' + urllib.urlencode(params) logger.log(u"Search string: " + search_url, logger.DEBUG) data = self.getURL(search_url) if not data: logger.log(u"No data returned from " + search_url, logger.ERROR) return [] parsedXML = helpers.parse_xml(data) if parsedXML is None: logger.log(u"Error trying to load " + self.name + " RSS feed", logger.ERROR) return [] items = parsedXML.findall('.//item') results = [] for curItem in items: (title, url) = self._get_title_and_url(curItem) if title and url: logger.log(u"Adding item from RSS to results: " + title, logger.DEBUG) results.append(curItem) else: logger.log(u"The XML returned from the " + self.name + " RSS feed is empty or incomplete, this result is unusable", logger.ERROR) return results def _get_title_and_url(self, item): (title, url) = generic.TorrentProvider._get_title_and_url(self, item) filename = helpers.get_xml_text(item.find('{//xmlns.ezrss.it/0.1/}torrent/{//xmlns.ezrss.it/0.1/}fileName')) if filename: new_title = self._extract_name_from_filename(filename) if new_title: title = new_title logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG) return (title, url) def _extract_name_from_filename(self, filename): name_regex = '(.*?)\.?(\[.*]|\d+\.TPB)\.torrent$' logger.log(u"Comparing " + name_regex + " against " + filename, logger.DEBUG) match = re.match(name_regex, filename, re.I) if match: return match.group(1) return None class EZRSSCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll EZRSS every 15 minutes max self.minTime = 15 def _getRSSData(self): rss_url = self.provider.url + 'ezrss.xml' logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None return data def _parseItem(self, item): (title, url) = self.provider._get_title_and_url(item) if title and url: logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG) url = self._translateLinkURL(url) self._addCacheEntry(title, url) else: logger.log(u"The XML returned from the " + self.provider.name + " feed is empty or incomplete, this result is unusable", logger.ERROR) return provider = EZRSSProvider()
6,028
Python
.py
132
35.833333
147
0.623568
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,371
torrentleech.py
midgetspy_Sick-Beard/sickbeard/providers/torrentleech.py
# Author: Robert Massa <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is based upon tvtorrents.py. # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import generic try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree from sickbeard import helpers, logger, tvcache from sickbeard.exceptions import ex, AuthException class TorrentLeechProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "TorrentLeech") self.supportsBacklog = False self.cache = TorrentLeechCache(self) self.url = 'http://www.torrentleech.org/' def isEnabled(self): return sickbeard.TORRENTLEECH def imageName(self): return 'torrentleech.png' def _checkAuth(self): if not sickbeard.TORRENTLEECH_KEY: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsedXML): if parsedXML is None: return self._checkAuth() description_text = helpers.get_xml_text(parsedXML.find('.//channel/item/description')) if "Your RSS key is invalid" in description_text: logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG) raise AuthException(u"Your authentication credentials for " + self.name + " are incorrect, check your config") return True class TorrentLeechCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll every 15 minutes self.minTime = 15 def _getRSSData(self): rss_url = 'http://rss.torrentleech.org/' + sickbeard.TORRENTLEECH_KEY logger.log(self.provider.name + u" cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None return data def _checkAuth(self, parsedXML): return self.provider._checkAuthFromData(parsedXML) provider = TorrentLeechProvider()
2,908
Python
.py
65
38.953846
126
0.710582
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,372
generic.py
midgetspy_Sick-Beard/sickbeard/providers/generic.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import datetime import os import re import urllib2 import sickbeard from sickbeard import helpers, classes, logger, db from sickbeard.common import Quality, MULTI_EP_RESULT, SEASON_RESULT from sickbeard import tvcache from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex from lib.hachoir_parser import createParser from sickbeard.name_parser.parser import NameParser, InvalidNameException class GenericProvider: NZB = "nzb" TORRENT = "torrent" def __init__(self, name): # these need to be set in the subclass self.providerType = None self.name = name self.url = '' self.supportsBacklog = False self.cache = tvcache.TVCache(self) def getID(self): return GenericProvider.makeID(self.name) @staticmethod def makeID(name): return re.sub("[^\w\d_]", "_", name.strip().lower()) def imageName(self): return self.getID() + '.png' def _checkAuth(self): return def isActive(self): if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS: return self.isEnabled() elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS: return self.isEnabled() else: return False def isEnabled(self): """ This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER """ return False def getResult(self, episodes): """ Returns a result of the correct type for this provider """ if self.providerType == GenericProvider.NZB: result = classes.NZBSearchResult(episodes) elif self.providerType == GenericProvider.TORRENT: result = classes.TorrentSearchResult(episodes) else: result = classes.SearchResult(episodes) result.provider = self return result def getURL(self, url, post_data=None, heads=None): """ By default this is just a simple urlopen call but this method should be overridden for providers with special URL requirements (like cookies) """ if post_data: if heads: req = urllib2.Request(url, post_data, heads) else: req = urllib2.Request(url, post_data); else: if heads: req = urllib2.Request(url, headers=heads); else: req = urllib2.Request(url); response = helpers.getURL(req) if response is None: logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR) return response def downloadResult(self, result): """ Save the result to disk. """ logger.log(u"Downloading a result from " + self.name + " at " + result.url) data = self.getURL(result.url) if not data: return False # use the appropriate watch folder if self.providerType == GenericProvider.NZB: saveDir = sickbeard.NZB_DIR writeMode = 'w' elif self.providerType == GenericProvider.TORRENT: saveDir = sickbeard.TORRENT_DIR writeMode = 'wb' else: return False # use the result name as the filename file_name = ek.ek(os.path.join, saveDir, helpers.sanitizeFileName(result.name) + '.' + self.providerType) logger.log(u"Saving to " + file_name, logger.DEBUG) try: with open(file_name, writeMode) as fileOut: fileOut.write(data) helpers.chmodAsParent(file_name) except EnvironmentError, e: logger.log(u"Unable to save the file: " + ex(e), logger.ERROR) return False # as long as it's a valid download then consider it a successful snatch return self._verify_download(file_name) def _verify_download(self, file_name=None): """ Checks the saved file to see if it was actually valid, if not then consider the download a failure. """ # primitive verification of torrents, just make sure we didn't get a text file or something if self.providerType == GenericProvider.TORRENT: parser = createParser(file_name) if parser: mime_type = parser._getMimeType() try: parser.stream._input.close() except: pass if mime_type != 'application/x-bittorrent': logger.log(u"Result is not a valid torrent file", logger.WARNING) return False return True def searchRSS(self): self._checkAuth() self.cache.updateCache() return self.cache.findNeededEpisodes() def getQuality(self, item): """ Figures out the quality of the given RSS item node item: An elementtree.ElementTree element representing the <item> tag of the RSS feed Returns a Quality value obtained from the node's data """ (title, url) = self._get_title_and_url(item) # @UnusedVariable quality = Quality.nameQuality(title) return quality def _doSearch(self): return [] def _get_season_search_strings(self, show, season, episode=None): return [] def _get_episode_search_strings(self, ep_obj): return [] def _get_title_and_url(self, item): """ Retrieves the title and URL data from the item XML node item: An elementtree.ElementTree element representing the <item> tag of the RSS feed Returns: A tuple containing two strings representing title and URL respectively """ title = helpers.get_xml_text(item.find('title')) if title: title = title.replace(' ', '.') url = helpers.get_xml_text(item.find('link')) if url: url = url.replace('&amp;', '&') return (title, url) def findEpisode(self, episode, manualSearch=False): logger.log(u"Searching " + self.name + " for " + episode.prettyName()) self.cache.updateCache() results = self.cache.searchCache(episode, manualSearch) logger.log(u"Cache results: " + str(results), logger.DEBUG) # if we got some results then use them no matter what. # OR # return anyway unless we're doing a manual search if results or not manualSearch: return results itemList = [] for cur_search_string in self._get_episode_search_strings(episode): itemList += self._doSearch(cur_search_string, show=episode.show) for item in itemList: (title, url) = self._get_title_and_url(item) # parse the file name try: myParser = NameParser() parse_result = myParser.parse(title) except InvalidNameException: logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING) continue if episode.show.air_by_date: if parse_result.air_date != episode.airdate: logger.log(u"Episode " + title + " didn't air on " + str(episode.airdate) + ", skipping it", logger.DEBUG) continue elif parse_result.season_number != episode.season or episode.episode not in parse_result.episode_numbers: logger.log(u"Episode " + title + " isn't " + str(episode.season) + "x" + str(episode.episode) + ", skipping it", logger.DEBUG) continue quality = self.getQuality(item) if not episode.show.wantEpisode(episode.season, episode.episode, quality, manualSearch): logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG) continue logger.log(u"Found result " + title + " at " + url, logger.DEBUG) result = self.getResult([episode]) result.url = url result.name = title result.quality = quality results.append(result) return results def findSeasonResults(self, show, season): itemList = [] results = {} for cur_string in self._get_season_search_strings(show, season): itemList += self._doSearch(cur_string) for item in itemList: (title, url) = self._get_title_and_url(item) quality = self.getQuality(item) # parse the file name try: myParser = NameParser(False) parse_result = myParser.parse(title) except InvalidNameException: logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING) continue if not show.air_by_date: # this check is meaningless for non-season searches if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1): logger.log(u"The result " + title + " doesn't seem to be a valid episode for season " + str(season) + ", ignoring") continue # we just use the existing info for normal searches actual_season = season actual_episodes = parse_result.episode_numbers else: if not parse_result.air_by_date: logger.log(u"This is supposed to be an air-by-date search but the result " + title + " didn't parse as one, skipping it", logger.DEBUG) continue myDB = db.DBConnection() sql_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?", [show.tvdbid, parse_result.air_date.toordinal()]) if len(sql_results) != 1: logger.log(u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it", logger.WARNING) continue actual_season = int(sql_results[0]["season"]) actual_episodes = [int(sql_results[0]["episode"])] # make sure we want the episode wantEp = True for epNo in actual_episodes: if not show.wantEpisode(actual_season, epNo, quality): wantEp = False break if not wantEp: logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG) continue logger.log(u"Found result " + title + " at " + url, logger.DEBUG) # make a result object epObj = [] for curEp in actual_episodes: epObj.append(show.getEpisode(actual_season, curEp)) result = self.getResult(epObj) result.url = url result.name = title result.quality = quality if len(epObj) == 1: epNum = epObj[0].episode elif len(epObj) > 1: epNum = MULTI_EP_RESULT logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(parse_result.episode_numbers), logger.DEBUG) elif len(epObj) == 0: epNum = SEASON_RESULT result.extraInfo = [show] logger.log(u"Separating full season result to check for later", logger.DEBUG) if epNum in results: results[epNum].append(result) else: results[epNum] = [result] return results def findPropers(self, search_date=None): results = self.cache.listPropers(search_date) return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time'])) for x in results] class NZBProvider(GenericProvider): def __init__(self, name): GenericProvider.__init__(self, name) self.providerType = GenericProvider.NZB class TorrentProvider(GenericProvider): def __init__(self, name): GenericProvider.__init__(self, name) self.providerType = GenericProvider.TORRENT
13,625
Python
.py
287
35.296167
168
0.597396
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,373
__init__.py
midgetspy_Sick-Beard/sickbeard/providers/__init__.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. __all__ = ['ezrss', 'hdbits', 'tvtorrents', 'torrentleech', 'womble', 'btn', 'omgwtfnzbs' ] import sickbeard from sickbeard import logger from os import sys def sortedProviderList(): initialList = sickbeard.providerList + sickbeard.newznabProviderList providerDict = dict(zip([x.getID() for x in initialList], initialList)) newList = [] # add all modules in the priority list, in order for curModule in sickbeard.PROVIDER_ORDER: if curModule in providerDict: newList.append(providerDict[curModule]) # add any modules that are missing from that list for curModule in providerDict: if providerDict[curModule] not in newList: newList.append(providerDict[curModule]) return newList def makeProviderList(): return [x.provider for x in [getProviderModule(y) for y in __all__] if x] def getNewznabProviderList(data): defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')] providerList = filter(lambda x: x, [makeNewznabProvider(x) for x in data.split('!!!')]) providerDict = dict(zip([x.name for x in providerList], providerList)) for curDefault in defaultList: if not curDefault: continue if curDefault.name not in providerDict: curDefault.default = True providerList.append(curDefault) else: providerDict[curDefault.name].default = True providerDict[curDefault.name].name = curDefault.name providerDict[curDefault.name].url = curDefault.url providerDict[curDefault.name].needs_auth = curDefault.needs_auth return filter(lambda x: x, providerList) def makeNewznabProvider(configString): if not configString: return None try: name, url, key, catIDs, enabled = configString.split('|') except ValueError: logger.log(u"Skipping Newznab provider string: '" + configString + "', incorrect format", logger.ERROR) return None newznab = sys.modules['sickbeard.providers.newznab'] newProvider = newznab.NewznabProvider(name, url, key=key, catIDs=catIDs) newProvider.enabled = enabled == '1' return newProvider def getDefaultNewznabProviders(): return 'Sick Beard Index|http://lolo.sickbeard.com/|0|5030,5040|1!!!NZBs.org|http://nzbs.org/||5030,5040,5070,5090|0!!!Usenet-Crawler|https://www.usenet-crawler.com/||5030,5040|0' def getProviderModule(name): name = name.lower() prefix = "sickbeard.providers." if name in __all__ and prefix + name in sys.modules: return sys.modules[prefix + name] else: raise Exception("Can't find " + prefix + name + " in " + repr(sys.modules)) def getProviderClass(providerID): providerMatch = [x for x in sickbeard.providerList + sickbeard.newznabProviderList if x.getID() == providerID] if len(providerMatch) != 1: return None else: return providerMatch[0]
3,918
Python
.py
86
38.034884
184
0.683679
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,374
newznab.py
midgetspy_Sick-Beard/sickbeard/providers/newznab.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import urllib import email.utils import datetime import re import os import time try: import xml.etree.cElementTree as etree except ImportError: import elementtree.ElementTree as etree import sickbeard import generic from sickbeard import helpers, classes, logger, db from sickbeard import scene_exceptions from sickbeard.common import Quality, MULTI_EP_RESULT, SEASON_RESULT from sickbeard import tvcache from sickbeard import encodingKludge as ek from sickbeard.exceptions import ex, AuthException from sickbeard.name_parser.parser import NameParser, InvalidNameException class NewznabProvider(generic.NZBProvider): def __init__(self, name, url, key='', catIDs='5030,5040'): generic.NZBProvider.__init__(self, name) self.cache = NewznabCache(self) self.url = url self.key = key # a 0 in the key spot indicates that no key is needed if self.key == '0': self.needs_auth = False else: self.needs_auth = True if catIDs: self.catIDs = catIDs else: self.catIDs = '5030,5040' self.enabled = True self.supportsBacklog = True self.default = False def configStr(self): return self.name + '|' + self.url + '|' + self.key + '|' + self.catIDs + '|' + str(int(self.enabled)) def imageName(self): if ek.ek(os.path.isfile, ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', 'providers', self.getID() + '.png')): return self.getID() + '.png' return 'newznab.png' def isEnabled(self): return self.enabled def findEpisode(self, episode, manualSearch=False): logger.log(u"Searching " + self.name + " for " + episode.prettyName()) self.cache.updateCache() results = self.cache.searchCache(episode, manualSearch) logger.log(u"Cache results: " + str(results), logger.DEBUG) # if we got some results then use them no matter what. # OR # return anyway unless we're doing a manual search if results or not manualSearch: return results itemList = [] for cur_search_string in self._get_episode_search_strings(episode): itemList += self._doSearch(cur_search_string, show=episode.show) # check if shows that we have a tvrage id for returned 0 results # if so, fall back to just searching by query if itemList == [] and episode.show.tvrid != 0: logger.log(u"Unable to find a result on " + self.name + " using tvrage id (" + str(episode.show.tvrid) + "), trying to search by string...", logger.WARNING) for cur_search_string in self._get_episode_search_strings(episode, ignore_tvr=True): itemList += self._doSearch(cur_search_string, show=episode.show) for item in itemList: (title, url) = self._get_title_and_url(item) # parse the file name try: myParser = NameParser() parse_result = myParser.parse(title) except InvalidNameException: logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING) continue if episode.show.air_by_date: if parse_result.air_date != episode.airdate: logger.log(u"Episode " + title + " didn't air on " + str(episode.airdate) + ", skipping it", logger.DEBUG) continue elif parse_result.season_number != episode.season or episode.episode not in parse_result.episode_numbers: logger.log(u"Episode " + title + " isn't " + str(episode.season) + "x" + str(episode.episode) + ", skipping it", logger.DEBUG) continue quality = self.getQuality(item) if not episode.show.wantEpisode(episode.season, episode.episode, quality, manualSearch): logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG) continue logger.log(u"Found result " + title + " at " + url, logger.DEBUG) result = self.getResult([episode]) result.url = url result.name = title result.quality = quality results.append(result) return results def findSeasonResults(self, show, season): itemList = [] results = {} for cur_string in self._get_season_search_strings(show, season): itemList += self._doSearch(cur_string) # check if shows that we have a tvrage id for returned 0 results # if so, fall back to just searching by query if itemList == [] and show.tvrid != 0: logger.log(u"Unable to find a result on " + self.name + " using tvrage id (" + str(show.tvrid) + "), trying to search by string...", logger.WARNING) for cur_string in self._get_season_search_strings(show, season, ignore_tvr=True): itemList += self._doSearch(cur_string) for item in itemList: (title, url) = self._get_title_and_url(item) quality = self.getQuality(item) # parse the file name try: myParser = NameParser(False) parse_result = myParser.parse(title) except InvalidNameException: logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING) continue if not show.air_by_date: # this check is meaningless for non-season searches if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1): logger.log(u"The result " + title + " doesn't seem to be a valid episode for season " + str(season) + ", ignoring") continue # we just use the existing info for normal searches actual_season = season actual_episodes = parse_result.episode_numbers else: if not parse_result.air_by_date: logger.log(u"This is supposed to be an air-by-date search but the result " + title + " didn't parse as one, skipping it", logger.DEBUG) continue myDB = db.DBConnection() sql_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?", [show.tvdbid, parse_result.air_date.toordinal()]) if len(sql_results) != 1: logger.log(u"Tried to look up the date for the episode " + title + " but the database didn't give proper results, skipping it", logger.WARNING) continue actual_season = int(sql_results[0]["season"]) actual_episodes = [int(sql_results[0]["episode"])] # make sure we want the episode wantEp = True for epNo in actual_episodes: if not show.wantEpisode(actual_season, epNo, quality): wantEp = False break if not wantEp: logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG) continue logger.log(u"Found result " + title + " at " + url, logger.DEBUG) # make a result object epObj = [] for curEp in actual_episodes: epObj.append(show.getEpisode(actual_season, curEp)) result = self.getResult(epObj) result.url = url result.name = title result.quality = quality if len(epObj) == 1: epNum = epObj[0].episode elif len(epObj) > 1: epNum = MULTI_EP_RESULT logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(parse_result.episode_numbers), logger.DEBUG) elif len(epObj) == 0: epNum = SEASON_RESULT result.extraInfo = [show] logger.log(u"Separating full season result to check for later", logger.DEBUG) if epNum in results: results[epNum].append(result) else: results[epNum] = [result] return results def _get_season_search_strings(self, show, season=None, ignore_tvr=False): if not show: return [{}] to_return = [] # add new query strings for exceptions name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid) + [show.name] for cur_exception in name_exceptions: cur_params = {} # search directly by tvrage id if not ignore_tvr and show.tvrid: cur_params['rid'] = show.tvrid # if we can't then fall back on a very basic name search else: cur_params['q'] = helpers.sanitizeSceneName(cur_exception) if season is not None: # air-by-date means &season=2010&q=2010.03, no other way to do it atm if show.air_by_date: cur_params['season'] = season.split('-')[0] if 'q' in cur_params: cur_params['q'] += '.' + season.replace('-', '.') else: cur_params['q'] = season.replace('-', '.') else: cur_params['season'] = season # hack to only add a single result if it's a rageid search if not ('rid' in cur_params and to_return): to_return.append(cur_params) return to_return def _get_episode_search_strings(self, ep_obj, ignore_tvr=False): params = {} if not ep_obj: return [params] # search directly by tvrage id if not ignore_tvr and ep_obj.show.tvrid: params['rid'] = ep_obj.show.tvrid # if we can't then fall back on a very basic name search else: params['q'] = helpers.sanitizeSceneName(ep_obj.show.name) if ep_obj.show.air_by_date: date_str = str(ep_obj.airdate) params['season'] = date_str.partition('-')[0] params['ep'] = date_str.partition('-')[2].replace('-', '/') else: params['season'] = ep_obj.season params['ep'] = ep_obj.episode to_return = [params] # only do exceptions if we are searching by name if 'q' in params: # add new query strings for exceptions name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid) for cur_exception in name_exceptions: # don't add duplicates if cur_exception == ep_obj.show.name: continue cur_return = params.copy() cur_return['q'] = helpers.sanitizeSceneName(cur_exception) to_return.append(cur_return) return to_return def _doGeneralSearch(self, search_string): return self._doSearch({'q': search_string}) def _checkAuth(self): if self.needs_auth and not self.key: logger.log(u"Incorrect authentication credentials for " + self.name + " : " + "API key is missing", logger.DEBUG) raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsedXML): if parsedXML is None: return self._checkAuth() if parsedXML.tag == 'error': code = parsedXML.attrib['code'] if code == '100': raise AuthException("Your API key for " + self.name + " is incorrect, check your config.") elif code == '101': raise AuthException("Your account on " + self.name + " has been suspended, contact the administrator.") elif code == '102': raise AuthException("Your account isn't allowed to use the API on " + self.name + ", contact the administrator") elif code == '910': logger.log(u"" + self.name + " currently has their API disabled, probably maintenance?", logger.WARNING) return False else: logger.log(u"Unknown error given from " + self.name + ": " + parsedXML.attrib['description'], logger.ERROR) return False return True def _doSearch(self, search_params, show=None, max_age=0): self._checkAuth() params = {"t": "tvsearch", "maxage": sickbeard.USENET_RETENTION, "limit": 100, "cat": self.catIDs} # if max_age is set, use it, don't allow it to be missing if max_age or not params['maxage']: params['maxage'] = max_age if search_params: params.update(search_params) if self.needs_auth and self.key: params['apikey'] = self.key results = [] offset = total = hits = 0 # hardcoded to stop after a max of 4 hits (400 items) per query while (hits < 4) and (offset == 0 or offset < total): if hits > 0: # sleep for a few seconds to not hammer the site and let cpu rest time.sleep(2) params['offset'] = offset search_url = self.url + 'api?' + urllib.urlencode(params) logger.log(u"Search url: " + search_url, logger.DEBUG) data = self.getURL(search_url) if not data: logger.log(u"No data returned from " + search_url, logger.ERROR) return results # hack this in until it's fixed server side if not data.startswith('<?xml'): data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data parsedXML = helpers.parse_xml(data) if parsedXML is None: logger.log(u"Error trying to load " + self.name + " XML data", logger.ERROR) return results if self._checkAuthFromData(parsedXML): if parsedXML.tag == 'rss': items = [] response_nodes = [] for node in parsedXML.getiterator(): # Collect all items for result parsing if node.tag == "item": items.append(node) # Find response nodes but ignore XML namespacing to # accomodate providers with alternative definitions elif node.tag.split("}", 1)[-1] == "response": response_nodes.append(node) # Verify that one and only one node matches and use it, # return otherwise if len(response_nodes) != 1: logger.log(u"No valid, unique response node was found in the API response", logger.ERROR) return results response = response_nodes[0] else: logger.log(u"Resulting XML from " + self.name + " isn't RSS, not parsing it", logger.ERROR) return results # process the items that we have for curItem in items: (title, url) = self._get_title_and_url(curItem) if title and url: # commenting this out for performance reasons, we see the results when they are added to cache anyways # logger.log(u"Adding item from RSS to results: " + title, logger.DEBUG) results.append(curItem) else: logger.log(u"The XML returned from the " + self.name + " RSS feed is incomplete, this result is unusable", logger.DEBUG) # check to see if our offset matches what was returned, otherwise dont trust their values and just use what we have if offset != int(response.get('offset') or 0): logger.log(u"Newznab provider returned invalid api data, report this to your provider! Aborting fetching further results.", logger.WARNING) return results try: total = int(response.get('total') or 0) except AttributeError: logger.log(u"Newznab provider provided invalid total.", logger.WARNING) break # if we have 0 results, just break out otherwise increment and continue if total == 0: break else: offset += 100 hits += 1 return results def findPropers(self, search_date=None): search_terms = ['.proper.', '.repack.'] cache_results = self.cache.listPropers(search_date) results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time'])) for x in cache_results] for term in search_terms: for item in self._doSearch({'q': term}, max_age=4): (title, url) = self._get_title_and_url(item) description_node = item.find('pubDate') description_text = helpers.get_xml_text(description_node) try: # we could probably do dateStr = descriptionStr but we want date in this format date_text = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', description_text).group(1) except: date_text = None if not date_text: logger.log(u"Unable to figure out the date for entry " + title + ", skipping it") continue else: result_date = email.utils.parsedate(date_text) if result_date: result_date = datetime.datetime(*result_date[0:6]) if not search_date or result_date > search_date: search_result = classes.Proper(title, url, result_date) results.append(search_result) return results class NewznabCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # only poll newznab providers every 15 minutes max self.minTime = 15 def _getRSSData(self): params = {"t": "tvsearch", "cat": self.provider.catIDs} if self.provider.needs_auth and self.provider.key: params['apikey'] = self.provider.key rss_url = self.provider.url + 'api?' + urllib.urlencode(params) logger.log(self.provider.name + " cache update URL: " + rss_url, logger.DEBUG) data = self.provider.getURL(rss_url) if not data: logger.log(u"No data returned from " + rss_url, logger.ERROR) return None # hack this in until it's fixed server side if data and not data.startswith('<?xml'): data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data return data def _checkAuth(self, parsedXML): return self.provider._checkAuthFromData(parsedXML)
20,812
Python
.py
391
38.70844
169
0.566279
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,375
btn.py
midgetspy_Sick-Beard/sickbeard/providers/btn.py
# coding=utf-8 # Author: Dani�l Heimans # URL: http://code.google.com/p/sickbeard # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import generic from sickbeard import classes from sickbeard import scene_exceptions from sickbeard import logger from sickbeard import tvcache from sickbeard.helpers import sanitizeSceneName from sickbeard.common import Quality from sickbeard.exceptions import ex, AuthException from lib import jsonrpclib from datetime import datetime import time import socket import math class BTNProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "BTN") self.supportsBacklog = True self.cache = BTNCache(self) self.url = "http://broadcasthe.net" def isEnabled(self): return sickbeard.BTN def imageName(self): return 'btn.png' def _checkAuth(self): if not sickbeard.BTN_API_KEY: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _checkAuthFromData(self, parsedJSON): if parsedJSON is None: return self._checkAuth() if 'api-error' in parsedJSON: logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'], logger.DEBUG) raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.") return True def _doSearch(self, search_params, show=None, age=0): self._checkAuth() params = {} apikey = sickbeard.BTN_API_KEY # age in seconds if age: params['age'] = "<=" + str(int(age)) if search_params: params.update(search_params) parsedJSON = self._api_call(apikey, params) if not parsedJSON: logger.log(u"No data returned from " + self.name, logger.ERROR) return [] if self._checkAuthFromData(parsedJSON): if 'torrents' in parsedJSON: found_torrents = parsedJSON['torrents'] else: found_torrents = {} # We got something, we know the API sends max 1000 results at a time. # See if there are more than 1000 results for our query, if not we # keep requesting until we've got everything. # max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4. max_pages = 35 results_per_page = 1000 if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page: pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page)) if pages_needed > max_pages: pages_needed = max_pages # +1 because range(1,4) = 1, 2, 3 for page in range(1, pages_needed + 1): parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page) # Note that this these are individual requests and might time out individually. This would result in 'gaps' # in the results. There is no way to fix this though. if 'torrents' in parsedJSON: found_torrents.update(parsedJSON['torrents']) results = [] for torrentid, torrent_info in found_torrents.iteritems(): # @UnusedVariable (title, url) = self._get_title_and_url(torrent_info) if title and url: results.append(torrent_info) return results return [] def _api_call(self, apikey, params={}, results_per_page=1000, offset=0): server = jsonrpclib.Server('http://api.btnapps.net') parsedJSON = {} try: parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset)) except jsonrpclib.jsonrpc.ProtocolError, error: logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR) parsedJSON = {'api-error': ex(error)} return parsedJSON except socket.timeout: logger.log(u"Timeout while accessing " + self.name, logger.WARNING) except socket.error, error: # Note that sometimes timeouts are thrown as socket errors logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR) except Exception, error: errorstring = str(error) if(errorstring.startswith('<') and errorstring.endswith('>')): errorstring = errorstring[1:-1] logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR) return parsedJSON def _get_title_and_url(self, parsedJSON): # The BTN API gives a lot of information in response, # however Sick Beard is built mostly around Scene or # release names, which is why we are using them here. if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']: title = parsedJSON['ReleaseName'] else: # If we don't have a release name we need to get creative title = u'' if 'Series' in parsedJSON: title += parsedJSON['Series'] if 'GroupName' in parsedJSON: title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName'] if 'Resolution' in parsedJSON: title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution'] if 'Source' in parsedJSON: title += '.' + parsedJSON['Source'] if title else parsedJSON['Source'] if 'Codec' in parsedJSON: title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec'] if title: title = title.replace(' ', '.') url = None if 'DownloadURL' in parsedJSON: url = parsedJSON['DownloadURL'] if url: # unescaped / is valid in JSON, but it can be escaped url = url.replace("\\/", "/") return (title, url) def _get_season_search_strings(self, show, season=None): if not show: return [{}] search_params = [] name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid) + [show.name] for name in name_exceptions: current_params = {} if show.tvdbid: current_params['tvdb'] = show.tvdbid elif show.tvrid: current_params['tvrage'] = show.tvrid else: # Search by name if we don't have tvdb or tvrage id current_params['series'] = sanitizeSceneName(name) if season is not None: whole_season_params = current_params.copy() partial_season_params = current_params.copy() # Search for entire seasons: no need to do special things for air by date shows whole_season_params['category'] = 'Season' whole_season_params['name'] = 'Season ' + str(season) if whole_season_params not in search_params: search_params.append(whole_season_params) # Search for episodes in the season partial_season_params['category'] = 'Episode' if show.air_by_date: # Search for the year of the air by date show partial_season_params['name'] = str(season.split('-')[0]) + "%" else: # Search for any result which has Sxx in the name partial_season_params['name'] = "S" + str(season).zfill(2) + "%" if partial_season_params not in search_params: search_params.append(partial_season_params) else: if current_params not in search_params: search_params.append(current_params) return search_params def _get_episode_search_strings(self, ep_obj): if not ep_obj: return [{}] search_params = {'category': 'Episode'} if ep_obj.show.tvdbid: search_params['tvdb'] = ep_obj.show.tvdbid elif ep_obj.show.tvrid: search_params['tvrage'] = ep_obj.show.rid else: search_params['series'] = sanitizeSceneName(ep_obj.show_name) if ep_obj.show.air_by_date: date_str = str(ep_obj.airdate) # BTN uses dots in dates, we just search for the date since that # combined with the series identifier should result in just one episode search_params['name'] = date_str.replace('-', '.') else: # Do a general name search for the episode, formatted like SXXEYY search_params['name'] = "S%02dE%02d" % (ep_obj.season, ep_obj.episode) to_return = [search_params] # only do scene exceptions if we are searching by name if 'series' in search_params: # add new query string for every exception name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid) for cur_exception in name_exceptions: # don't add duplicates if cur_exception == ep_obj.show.name: continue # copy all other parameters before setting the show name for this exception cur_return = search_params.copy() cur_return['series'] = sanitizeSceneName(cur_exception) to_return.append(cur_return) return to_return def _doGeneralSearch(self, search_string): # 'search' looks as broad is it can find. Can contain episode overview and title for example, # use with caution! return self._doSearch({'search': search_string}) def findPropers(self, search_date=None): results = [] search_terms = ['%.proper.%', '%.repack.%'] for term in search_terms: for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60): if item['Time']: try: result_date = datetime.fromtimestamp(float(item['Time'])) except TypeError: result_date = None if result_date: if not search_date or result_date > search_date: title, url = self._get_title_and_url(item) results.append(classes.Proper(title, url, result_date)) return results class BTNCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # At least 15 minutes between queries self.minTime = 15 def updateCache(self): if not self.shouldUpdate(): return if self._checkAuth(None): data = self._getRSSData() # As long as we got something from the provider we count it as an update if data: self.setLastUpdate() else: return [] logger.log(u"Clearing " + self.provider.name + " cache and updating with new information") self._clearCache() if self._checkAuth(data): # By now we know we've got data and no auth errors, all we need to do is put it in the database for item in data: self._parseItem(item) else: raise AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config") else: return [] def _getRSSData(self): # Get the torrents uploaded since last check. seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple())) # default to 15 minutes seconds_minTime = self.minTime * 60 if seconds_since_last_update < seconds_minTime: seconds_since_last_update = seconds_minTime # Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog if seconds_since_last_update > 86400: logger.log(u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!", logger.WARNING) seconds_since_last_update = 86400 data = self.provider._doSearch(search_params=None, age=seconds_since_last_update) return data def _parseItem(self, item): (title, url) = self.provider._get_title_and_url(item) if title and url: logger.log(u"Adding item to results: " + title, logger.DEBUG) self._addCacheEntry(title, url) else: logger.log(u"The data returned from the " + self.provider.name + " is incomplete, this result is unusable", logger.ERROR) return def _checkAuth(self, data): return self.provider._checkAuthFromData(data) provider = BTNProvider()
13,919
Python
.py
279
38.132616
173
0.601433
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,376
regexes.py
midgetspy_Sick-Beard/sickbeard/name_parser/regexes.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. # all regexes are case insensitive ep_regexes = [ ('standard_repeat', # Show.Name.S01E02.S01E03.Source.Quality.Etc-Group # Show Name - S01E02 - S01E03 - S01E04 - Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator s(?P<season_num>\d+)[. _-]* # S01 and optional separator e(?P<ep_num>\d+) # E02 and separator ([. _-]+s(?P=season_num)[. _-]* # S01 and optional separator e(?P<extra_ep_num>\d+))+ # E03/etc and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('fov_repeat', # Show.Name.1x02.1x03.Source.Quality.Etc-Group # Show Name - 1x02 - 1x03 - 1x04 - Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator (?P<season_num>\d+)x # 1x (?P<ep_num>\d+) # 02 and separator ([. _-]+(?P=season_num)x # 1x (?P<extra_ep_num>\d+))+ # 03/etc and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('standard', # Show.Name.S01E02.Source.Quality.Etc-Group # Show Name - S01E02 - My Ep Name # Show.Name.S01.E03.My.Ep.Name # Show.Name.S01E02E03.Source.Quality.Etc-Group # Show Name - S01E02-03 - My Ep Name # Show.Name.S01.E02.E03 ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator s(?P<season_num>\d+)[. _-]* # S01 and optional separator e(?P<ep_num>\d+) # E02 and separator (([. _-]*e|-) # linking e/- char (?P<extra_ep_num>(?!(1080|720|480)[pi])\d+))* # additional E03/etc [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('fov', # Show_Name.1x02.Source_Quality_Etc-Group # Show Name - 1x02 - My Ep Name # Show_Name.1x02x03x04.Source_Quality_Etc-Group # Show Name - 1x02-03-04 - My Ep Name ''' ^((?P<series_name>.+?)[\[. _-]+)? # Show_Name and separator (?P<season_num>\d+)x # 1x (?P<ep_num>\d+) # 02 and separator (([. _-]*x|-) # linking x/- char (?P<extra_ep_num> (?!(1080|720|480)[pi])(?!(?<=x)264) # ignore obviously wrong multi-eps \d+))* # additional x03/etc [\]. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('scene_date_format', # Show.Name.2010.11.23.Source.Quality.Etc-Group # Show Name - 2010-11-23 - Ep Name ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (?P<air_year>\d{4})[. _-]+ # 2010 and separator (?P<air_month>\d{2})[. _-]+ # 11 and separator (?P<air_day>\d{2}) # 23 and separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('stupid', # tpz-abc102 ''' (?P<release_group>.+?)-\w+?[\. ]? # tpz-abc (?!264) # don't count x264 (?P<season_num>\d{1,2}) # 1 (?P<ep_num>\d{2})$ # 02 ''' ), ('verbose', # Show Name Season 1 Episode 2 Ep Name ''' ^(?P<series_name>.+?)[. _-]+ # Show Name and separator season[. _-]+ # season and separator (?P<season_num>\d+)[. _-]+ # 1 episode[. _-]+ # episode and separator (?P<ep_num>\d+)[. _-]+ # 02 and separator (?P<extra_info>.+)$ # Source_Quality_Etc- ''' ), ('season_only', # Show.Name.S01.Source.Quality.Etc-Group ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator s(eason[. _-])? # S01/Season 01 (?P<season_num>\d+)[. _-]* # S01 and optional separator [. _-]*((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('no_season_multi_ep', # Show.Name.E02-03 # Show.Name.E02.2010 ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part (?P<ep_num>(\d+|[ivx]+)) # first ep num ((([. _-]+(and|&|to)[. _-]+)|-) # and/&/to joiner (?P<extra_ep_num>(?!(1080|720|480)[pi])(\d+|[ivx]+))[. _-]) # second ep num ([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('no_season_general', # Show.Name.E23.Test # Show.Name.Part.3.Source.Quality.Etc-Group # Show.Name.Part.1.and.Part.2.Blah-Group ''' ^((?P<series_name>.+?)[. _-]+)? # Show_Name and separator (e(p(isode)?)?|part|pt)[. _-]? # e, ep, episode, or part (?P<ep_num>(\d+|([ivx]+(?=[. _-])))) # first ep num ([. _-]+((and|&|to)[. _-]+)? # and/&/to joiner ((e(p(isode)?)?|part|pt)[. _-]?) # e, ep, episode, or part (?P<extra_ep_num>(?!(1080|720|480)[pi]) (\d+|([ivx]+(?=[. _-]))))[. _-])* # second ep num ([. _-]*(?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ('bare', # Show.Name.102.Source.Quality.Etc-Group ''' ^(?P<series_name>.+?)[. _-]+ # Show_Name and separator (?P<season_num>\d{1,2}) # 1 (?P<ep_num>\d{2}) # 02 and separator ([. _-]+(?P<extra_info>(?!\d{3}[. _-]+)[^-]+) # Source_Quality_Etc- (-(?P<release_group>.+))?)?$ # Group ''' ), ('no_season', # Show Name - 01 - Ep Name # 01 - Ep Name ''' ^((?P<series_name>.+?)(?:[. _-]{2,}|[. _]))? # Show_Name and separator (?P<ep_num>\d{1,2}) # 01 (?:-(?P<extra_ep_num>\d{1,2}))* # 02 [. _-]+((?P<extra_info>.+?) # Source_Quality_Etc- ((?<![. _-])(?<!WEB) # Make sure this is really the release group -(?P<release_group>[^- ]+))?)?$ # Group ''' ), ]
10,424
Python
.py
181
42.165746
106
0.34883
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,377
parser.py
midgetspy_Sick-Beard/sickbeard/name_parser/parser.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os.path import re import sickbeard from sickbeard import encodingKludge as ek from sickbeard import helpers from sickbeard import logger from sickbeard.exceptions import ex from sickbeard.name_parser import regexes class NameParser(object): def __init__(self, is_file_name=True): self.is_file_name = is_file_name self.compiled_regexes = [] self._compile_regexes() def clean_series_name(self, series_name): """Cleans up series name by removing any . and _ characters, along with any trailing hyphens. Is basically equivalent to replacing all _ and . with a space, but handles decimal numbers in string, for example: >>> cleanRegexedSeriesName("an.example.1.0.test") 'an example 1.0 test' >>> cleanRegexedSeriesName("an_example_1.0_test") 'an example 1.0 test' Stolen from dbr's tvnamer """ series_name = re.sub("(\D)\.(?!\s)(\D)", "\\1 \\2", series_name) series_name = re.sub("(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot series_name = re.sub("(\D)\.(?!\s)", "\\1 ", series_name) series_name = re.sub("\.(?!\s)(\D)", " \\1", series_name) series_name = series_name.replace("_", " ") series_name = re.sub("-$", "", series_name) return series_name.strip() def _compile_regexes(self): for (cur_pattern_name, cur_pattern) in regexes.ep_regexes: try: cur_regex = re.compile(cur_pattern, re.VERBOSE | re.IGNORECASE) except re.error, errormsg: logger.log(u"WARNING: Invalid episode_pattern, %s. %s" % (errormsg, cur_pattern)) else: self.compiled_regexes.append((cur_pattern_name, cur_regex)) def _parse_string(self, name): if not name: return None for (cur_regex_name, cur_regex) in self.compiled_regexes: match = cur_regex.match(name) if not match: continue result = ParseResult(name) result.which_regex = [cur_regex_name] named_groups = match.groupdict().keys() if 'series_name' in named_groups: result.series_name = match.group('series_name') if result.series_name: result.series_name = self.clean_series_name(result.series_name) if 'season_num' in named_groups: tmp_season = int(match.group('season_num')) if cur_regex_name == 'bare' and tmp_season in (19, 20): continue result.season_number = tmp_season if 'ep_num' in named_groups: try: ep_num = self._convert_number(match.group('ep_num')) if 'extra_ep_num' in named_groups and match.group('extra_ep_num'): result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1) else: result.episode_numbers = [ep_num] except ValueError, e: raise InvalidNameException(ex(e)) if 'air_year' in named_groups and 'air_month' in named_groups and 'air_day' in named_groups: year = int(match.group('air_year')) month = int(match.group('air_month')) day = int(match.group('air_day')) # make an attempt to detect YYYY-DD-MM formats if month > 12: tmp_month = month month = day day = tmp_month try: result.air_date = datetime.date(year, month, day) except ValueError, e: raise InvalidNameException(ex(e)) result.is_proper = False if 'extra_info' in named_groups: tmp_extra_info = match.group('extra_info') # Check if it's a proper if tmp_extra_info: result.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', tmp_extra_info, re.I) is not None # Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season if tmp_extra_info and cur_regex_name == 'season_only' and re.search(r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I): continue result.extra_info = tmp_extra_info if 'release_group' in named_groups: result.release_group = match.group('release_group') return result return None def _combine_results(self, first, second, attr): # if the first doesn't exist then return the second or nothing if not first: if not second: return None else: return getattr(second, attr) # if the second doesn't exist then return the first if not second: return getattr(first, attr) a = getattr(first, attr) b = getattr(second, attr) # if a is good use it if a is not None or (type(a) == list and len(a)): return a # if not use b (if b isn't set it'll just be default) else: return b def _unicodify(self, obj, encoding="utf-8"): if isinstance(obj, basestring): if not isinstance(obj, unicode): obj = unicode(obj, encoding) return obj def _convert_number(self, number): """ Convert number into an integer. Try force converting into integer first, on error try converting from Roman numerals. Args: number: int or representation of a number: string or unicode Returns: integer: int number Raises: ValueError """ try: # try forcing to int integer = int(number) except: # on error try converting from Roman numerals roman_numeral_map = ( ('M', 1000, 3), ('CM', 900, 1), ('D', 500, 1), ('CD', 400, 1), ('C', 100, 3), ('XC', 90, 1), ('L', 50, 1), ('XL', 40, 1), ('X', 10, 3), ('IX', 9, 1), ('V', 5, 1), ('IV', 4, 1), ('I', 1, 3) ) roman_numeral = str(number).upper() integer = 0 index = 0 for numeral, value, max_count in roman_numeral_map: count = 0 while roman_numeral[index:index + len(numeral)] == numeral: count += 1 if count > max_count: raise ValueError('not a roman numeral') integer += value index += len(numeral) if index < len(roman_numeral): raise ValueError('not a roman numeral') return integer def parse(self, name): name = self._unicodify(name) cached = name_parser_cache.get(name) if cached: return cached # break it into parts if there are any (dirname, file name, extension) dir_name, file_name = ek.ek(os.path.split, name) if self.is_file_name: base_file_name = helpers.remove_non_release_groups(helpers.remove_extension(file_name)) else: base_file_name = file_name # use only the direct parent dir dir_name = ek.ek(os.path.basename, dir_name) # set up a result to use final_result = ParseResult(name) # try parsing the file name file_name_result = self._parse_string(base_file_name) # parse the dirname for extra info if needed dir_name_result = self._parse_string(dir_name) # build the ParseResult object final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date') if not final_result.air_date: final_result.season_number = self._combine_results(file_name_result, dir_name_result, 'season_number') final_result.episode_numbers = self._combine_results(file_name_result, dir_name_result, 'episode_numbers') final_result.is_proper = self._combine_results(file_name_result, dir_name_result, 'is_proper') # if the dirname has a release group/show name I believe it over the filename final_result.series_name = self._combine_results(dir_name_result, file_name_result, 'series_name') final_result.extra_info = self._combine_results(dir_name_result, file_name_result, 'extra_info') final_result.release_group = self._combine_results(dir_name_result, file_name_result, 'release_group') final_result.which_regex = [] if final_result == file_name_result: final_result.which_regex = file_name_result.which_regex elif final_result == dir_name_result: final_result.which_regex = dir_name_result.which_regex else: if file_name_result: final_result.which_regex += file_name_result.which_regex if dir_name_result: final_result.which_regex += dir_name_result.which_regex # if there's no useful info in it then raise an exception if final_result.season_number is None and not final_result.episode_numbers and final_result.air_date is None and not final_result.series_name: raise InvalidNameException("Unable to parse " + name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')) name_parser_cache.add(name, final_result) # return it return final_result class ParseResult(object): def __init__(self, original_name, series_name=None, season_number=None, episode_numbers=None, extra_info=None, release_group=None, air_date=None ): self.original_name = original_name self.series_name = series_name self.season_number = season_number if not episode_numbers: self.episode_numbers = [] else: self.episode_numbers = episode_numbers self.extra_info = extra_info self.release_group = release_group self.air_date = air_date self.which_regex = None def __eq__(self, other): if not other: return False if self.series_name != other.series_name: return False if self.season_number != other.season_number: return False if self.episode_numbers != other.episode_numbers: return False if self.extra_info != other.extra_info: return False if self.release_group != other.release_group: return False if self.air_date != other.air_date: return False return True def __str__(self): if self.series_name is not None: to_return = self.series_name + u' - ' else: to_return = u'' if self.season_number is not None: to_return += 'S' + str(self.season_number) if self.episode_numbers and len(self.episode_numbers): for e in self.episode_numbers: to_return += 'E' + str(e) if self.air_by_date: to_return += str(self.air_date) if self.extra_info: to_return += ' - ' + self.extra_info if self.release_group: to_return += ' (' + self.release_group + ')' to_return += ' [ABD: ' + str(self.air_by_date) + ']' return to_return.encode('utf-8') def _is_air_by_date(self): if self.season_number is None and len(self.episode_numbers) == 0 and self.air_date: return True return False air_by_date = property(_is_air_by_date) class NameParserCache(object): # TODO: check if the fifo list can be skipped and only use one dict _previous_parsed_list = [] # keep a fifo list of the cached items _previous_parsed = {} _cache_size = 100 def add(self, name, parse_result): self._previous_parsed[name] = parse_result self._previous_parsed_list.append(name) while len(self._previous_parsed_list) > self._cache_size: del_me = self._previous_parsed_list.pop(0) self._previous_parsed.pop(del_me) def get(self, name): if name in self._previous_parsed: logger.log(u"Using cached parse result for: " + name, logger.DEBUG) return self._previous_parsed[name] else: return None name_parser_cache = NameParserCache() class InvalidNameException(Exception): "The given name is not valid"
14,057
Python
.py
296
34.716216
152
0.5669
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,378
__init__.py
midgetspy_Sick-Beard/sickbeard/databases/__init__.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. __all__ = ["mainDB", "cache"]
799
Python
.py
18
42.277778
71
0.739409
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,379
cache_db.py
midgetspy_Sick-Beard/sickbeard/databases/cache_db.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from sickbeard import db # Add new migrations at the bottom of the list; subclass the previous migration. class InitialSchema (db.SchemaUpgrade): def test(self): return self.hasTable("lastUpdate") def execute(self): queries = [ ("CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);",), ("CREATE TABLE db_version (db_version INTEGER);",), ("INSERT INTO db_version (db_version) VALUES (?)", 1), ] for query in queries: if len(query) == 1: self.connection.action(query[0]) else: self.connection.action(query[0], query[1:]) class AddSceneExceptions(InitialSchema): def test(self): return self.hasTable("scene_exceptions") def execute(self): self.connection.action("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, tvdb_id INTEGER KEY, show_name TEXT, provider TEXT)") class AddSceneNameCache(AddSceneExceptions): def test(self): return self.hasTable("scene_names") def execute(self): self.connection.action("CREATE TABLE scene_names (tvdb_id INTEGER, name TEXT)") class AddSceneExceptionsProvider(AddSceneNameCache): def test(self): return self.hasColumn("scene_exceptions", "provider") def execute(self): if not self.hasColumn("scene_exceptions", "provider"): self.addColumn("scene_exceptions", "provider", data_type='TEXT', default='sb_tvdb_scene_exceptions')
2,319
Python
.py
49
40.346939
151
0.686887
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,380
mainDB.py
midgetspy_Sick-Beard/sickbeard/databases/mainDB.py
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard import os.path import sys import re from sickbeard import db, common, helpers, logger from sickbeard import encodingKludge as ek from sickbeard.name_parser.parser import NameParser, InvalidNameException MIN_DB_VERSION = 9 # oldest db version we support migrating from MAX_DB_VERSION = 18 class MainSanityCheck(db.DBSanityCheck): def check(self): self.fix_duplicate_shows() self.fix_duplicate_episodes() self.fix_orphan_episodes() def fix_duplicate_shows(self): sqlResults = self.connection.select("SELECT show_id, tvdb_id, COUNT(tvdb_id) as count FROM tv_shows GROUP BY tvdb_id HAVING count > 1") for cur_duplicate in sqlResults: logger.log(u"Duplicate show detected! tvdb_id: " + str(cur_duplicate["tvdb_id"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG) cur_dupe_results = self.connection.select("SELECT show_id, tvdb_id FROM tv_shows WHERE tvdb_id = ? LIMIT ?", [cur_duplicate["tvdb_id"], int(cur_duplicate["count"]) - 1] ) for cur_dupe_id in cur_dupe_results: logger.log(u"Deleting duplicate show with tvdb_id: " + str(cur_dupe_id["tvdb_id"]) + u" show_id: " + str(cur_dupe_id["show_id"])) self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id["show_id"]]) else: logger.log(u"No duplicate show, check passed") def fix_duplicate_episodes(self): sqlResults = self.connection.select("SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid, season, episode HAVING count > 1") for cur_duplicate in sqlResults: logger.log(u"Duplicate episode detected! showid: " + str(cur_duplicate["showid"]) + u" season: " + str(cur_duplicate["season"]) + u" episode: " + str(cur_duplicate["episode"]) + u" count: " + str(cur_duplicate["count"]), logger.DEBUG) cur_dupe_results = self.connection.select("SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?", [cur_duplicate["showid"], cur_duplicate["season"], cur_duplicate["episode"], int(cur_duplicate["count"]) - 1] ) for cur_dupe_id in cur_dupe_results: logger.log(u"Deleting duplicate episode with episode_id: " + str(cur_dupe_id["episode_id"])) self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_dupe_id["episode_id"]]) else: logger.log(u"No duplicate episode, check passed") def fix_orphan_episodes(self): sqlResults = self.connection.select("SELECT episode_id, showid, tv_shows.tvdb_id FROM tv_episodes LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.tvdb_id WHERE tv_shows.tvdb_id is NULL") for cur_orphan in sqlResults: logger.log(u"Orphan episode detected! episode_id: " + str(cur_orphan["episode_id"]) + " showid: " + str(cur_orphan["showid"]), logger.DEBUG) logger.log(u"Deleting orphan episode with episode_id: " + str(cur_orphan["episode_id"])) self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_orphan["episode_id"]]) else: logger.log(u"No orphan episodes, check passed") def backupDatabase(version): logger.log(u"Backing up database before upgrade") if not helpers.backupVersionedFile(db.dbFilename(), version): logger.log_error_and_exit(u"Database backup failed, abort upgrading database") else: logger.log(u"Proceeding with upgrade") # ====================== # = Main DB Migrations = # ====================== # Add new migrations at the bottom of the list; subclass the previous migration. # schema is based off v18 - build 507 class InitialSchema (db.SchemaUpgrade): def test(self): return self.hasTable("tv_shows") and self.hasTable("db_version") and self.checkDBVersion() >= MIN_DB_VERSION and self.checkDBVersion() <= MAX_DB_VERSION def execute(self): if not self.hasTable("tv_shows") and not self.hasTable("db_version"): queries = [ "CREATE TABLE db_version (db_version INTEGER);", "CREATE TABLE history (action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT, source TEXT);", "CREATE TABLE info (last_backlog NUMERIC, last_tvdb NUMERIC);", "CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, tvdbid NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT);", "CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT, last_update_tvdb NUMERIC, rls_require_words TEXT, rls_ignore_words TEXT, skip_notices NUMERIC);", "CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes (showid,airdate);", "CREATE INDEX idx_showid ON tv_episodes (showid);", "CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);", "INSERT INTO db_version (db_version) VALUES (18);" ] for query in queries: self.connection.action(query) else: cur_db_version = self.checkDBVersion() if cur_db_version < MIN_DB_VERSION: logger.log_error_and_exit(u"Your database version (" + str(cur_db_version) + ") is too old to migrate from what this version of Sick Beard supports (" + \ str(MIN_DB_VERSION) + ").\n" + \ "Upgrade using a previous version (tag) build 496 to build 501 of Sick Beard first or remove database file to begin fresh." ) if cur_db_version > MAX_DB_VERSION: logger.log_error_and_exit(u"Your database version (" + str(cur_db_version) + ") has been incremented past what this version of Sick Beard supports (" + \ str(MAX_DB_VERSION) + ").\n" + \ "If you have used other forks of Sick Beard, your database may be unusable due to their modifications." ) # included in build 496 (2012-06-28) class AddSizeAndSceneNameFields(InitialSchema): def test(self): return self.checkDBVersion() >= 10 def execute(self): backupDatabase(10) if not self.hasColumn("tv_episodes", "file_size"): self.addColumn("tv_episodes", "file_size") if not self.hasColumn("tv_episodes", "release_name"): self.addColumn("tv_episodes", "release_name", "TEXT", "") ep_results = self.connection.select("SELECT episode_id, location, file_size FROM tv_episodes") logger.log(u"Adding file size to all episodes in DB, please be patient") for cur_ep in ep_results: if not cur_ep["location"]: continue # if there is no size yet then populate it for us if (not cur_ep["file_size"] or not int(cur_ep["file_size"])) and ek.ek(os.path.isfile, cur_ep["location"]): cur_size = ek.ek(os.path.getsize, cur_ep["location"]) self.connection.action("UPDATE tv_episodes SET file_size = ? WHERE episode_id = ?", [cur_size, int(cur_ep["episode_id"])]) # check each snatch to see if we can use it to get a release name from history_results = self.connection.select("SELECT * FROM history WHERE provider != -1 ORDER BY date ASC") logger.log(u"Adding release name to all episodes still in history") for cur_result in history_results: # find the associated download, if there isn't one then ignore it download_results = self.connection.select("SELECT resource FROM history WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?", [cur_result["showid"], cur_result["season"], cur_result["episode"], cur_result["date"]]) if not download_results: logger.log(u"Found a snatch in the history for " + cur_result["resource"] + " but couldn't find the associated download, skipping it", logger.DEBUG) continue nzb_name = cur_result["resource"] file_name = ek.ek(os.path.basename, download_results[0]["resource"]) # take the extension off the filename, it's not needed if '.' in file_name: file_name = file_name.rpartition('.')[0] # find the associated episode on disk ep_results = self.connection.select("SELECT episode_id, status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND location != ''", [cur_result["showid"], cur_result["season"], cur_result["episode"]]) if not ep_results: logger.log(u"The episode " + nzb_name + " was found in history but doesn't exist on disk anymore, skipping", logger.DEBUG) continue # get the status/quality of the existing ep and make sure it's what we expect ep_status, ep_quality = common.Quality.splitCompositeStatus(int(ep_results[0]["status"])) if ep_status != common.DOWNLOADED: continue if ep_quality != int(cur_result["quality"]): continue # make sure this is actually a real release name and not a season pack or something for cur_name in (nzb_name, file_name): logger.log(u"Checking if " + cur_name + " is actually a good release name", logger.DEBUG) try: np = NameParser(False) parse_result = np.parse(cur_name) except InvalidNameException: continue if parse_result.series_name and parse_result.season_number is not None and parse_result.episode_numbers and parse_result.release_group: # if all is well by this point we'll just put the release name into the database self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [cur_name, ep_results[0]["episode_id"]]) break # check each snatch to see if we can use it to get a release name from empty_results = self.connection.select("SELECT episode_id, location FROM tv_episodes WHERE release_name = ''") logger.log(u"Adding release name to all episodes with obvious scene filenames") for cur_result in empty_results: ep_file_name = ek.ek(os.path.basename, cur_result["location"]) ep_file_name = os.path.splitext(ep_file_name)[0] # only want to find real scene names here so anything with a space in it is out if ' ' in ep_file_name: continue try: np = NameParser(False) parse_result = np.parse(ep_file_name) except InvalidNameException: continue if not parse_result.release_group: continue logger.log(u"Name " + ep_file_name + " gave release group of " + parse_result.release_group + ", seems valid", logger.DEBUG) self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [ep_file_name, cur_result["episode_id"]]) self.incDBVersion() # included in build 497 (2012-10-16) class RenameSeasonFolders(AddSizeAndSceneNameFields): def test(self): return self.checkDBVersion() >= 11 def execute(self): backupDatabase(11) # rename the column self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows") self.connection.action("CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT)") sql = "INSERT INTO tv_shows(show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, flatten_folders, paused, startyear, tvr_id, tvr_name, air_by_date, lang) SELECT show_id, location, show_name, tvdb_id, network, genre, runtime, quality, airs, status, seasonfolders, paused, startyear, tvr_id, tvr_name, air_by_date, lang FROM tmp_tv_shows" self.connection.action(sql) # flip the values to be opposite of what they were before self.connection.action("UPDATE tv_shows SET flatten_folders = 2 WHERE flatten_folders = 1") self.connection.action("UPDATE tv_shows SET flatten_folders = 1 WHERE flatten_folders = 0") self.connection.action("UPDATE tv_shows SET flatten_folders = 0 WHERE flatten_folders = 2") self.connection.action("DROP TABLE tmp_tv_shows") self.incDBVersion() # included in build 500 (2013-05-11) class Add1080pAndRawHDQualities(RenameSeasonFolders): """Add support for 1080p related qualities along with RawHD Quick overview of what the upgrade needs to do: quality | old | new -------------------------- hdwebdl | 1<<3 | 1<<5 hdbluray | 1<<4 | 1<<7 fullhdbluray | 1<<5 | 1<<8 -------------------------- rawhdtv | | 1<<3 fullhdtv | | 1<<4 fullhdwebdl | | 1<<6 """ def test(self): return self.checkDBVersion() >= 12 def _update_status(self, old_status): (status, quality) = common.Quality.splitCompositeStatus(old_status) return common.Quality.compositeStatus(status, self._update_quality(quality)) def _update_quality(self, old_quality): """Update bitwise flags to reflect new quality values Check flag bits (clear old then set their new locations) starting with the highest bits so we dont overwrite data we need later on """ result = old_quality # move fullhdbluray from 1<<5 to 1<<8 if set if(result & (1<<5)): result = result & ~(1<<5) result = result | (1<<8) # move hdbluray from 1<<4 to 1<<7 if set if(result & (1<<4)): result = result & ~(1<<4) result = result | (1<<7) # move hdwebdl from 1<<3 to 1<<5 if set if(result & (1<<3)): result = result & ~(1<<3) result = result | (1<<5) return result def _update_composite_qualities(self, status): """Unpack, Update, Return new quality values Unpack the composite archive/initial values. Update either qualities if needed. Then return the new compsite quality value. """ best = (status & (0xffff << 16)) >> 16 initial = status & (0xffff) best = self._update_quality(best) initial = self._update_quality(initial) result = ((best << 16) | initial) return result def execute(self): backupDatabase(12) # update the default quality so we dont grab the wrong qualities after migration -- should have really been a config migration sickbeard.QUALITY_DEFAULT = self._update_composite_qualities(sickbeard.QUALITY_DEFAULT) sickbeard.save_config() # upgrade previous HD to HD720p -- shift previous qualities to new placevalues old_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3], []) new_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL, common.Quality.HDBLURAY], []) # update ANY -- shift existing qualities and add new 1080p qualities, note that rawHD was not added to the ANY template old_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3, common.Quality.UNKNOWN], []) new_any = common.Quality.combineQualities([common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.FULLHDTV, common.Quality.HDWEBDL, common.Quality.FULLHDWEBDL, common.Quality.HDBLURAY, common.Quality.FULLHDBLURAY, common.Quality.UNKNOWN], []) # update qualities (including templates) logger.log(u"[1/4] Updating pre-defined templates and the quality for each show...", logger.MESSAGE) ql = [] shows = self.connection.select("SELECT * FROM tv_shows") for cur_show in shows: if cur_show["quality"] == old_hd: new_quality = new_hd elif cur_show["quality"] == old_any: new_quality = new_any else: new_quality = self._update_composite_qualities(cur_show["quality"]) ql.append(["UPDATE tv_shows SET quality = ? WHERE show_id = ?", [new_quality, cur_show["show_id"]]]) self.connection.mass_action(ql) # update status that are are within the old hdwebdl (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768) logger.log(u"[2/4] Updating the status for the episodes within each show...", logger.MESSAGE) ql = [] episodes = self.connection.select("SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800") for cur_episode in episodes: ql.append(["UPDATE tv_episodes SET status = ? WHERE episode_id = ?", [self._update_status(cur_episode["status"]), cur_episode["episode_id"]]]) self.connection.mass_action(ql) # make two separate passes through the history since snatched and downloaded (action & quality) may not always coordinate together # update previous history so it shows the correct action logger.log(u"[3/4] Updating history to reflect the correct action...", logger.MESSAGE) ql = [] historyAction = self.connection.select("SELECT * FROM history WHERE action < 3276800 AND action >= 800") for cur_entry in historyAction: ql.append(["UPDATE history SET action = ? WHERE showid = ? AND date = ?", [self._update_status(cur_entry["action"]), cur_entry["showid"], cur_entry["date"]]]) self.connection.mass_action(ql) # update previous history so it shows the correct quality logger.log(u"[4/4] Updating history to reflect the correct quality...", logger.MESSAGE) ql = [] historyQuality = self.connection.select("SELECT * FROM history WHERE quality < 32768 AND quality >= 8") for cur_entry in historyQuality: ql.append(["UPDATE history SET quality = ? WHERE showid = ? AND date = ?", [self._update_quality(cur_entry["quality"]), cur_entry["showid"], cur_entry["date"]]]) self.connection.mass_action(ql) self.incDBVersion() # cleanup and reduce db if any previous data was removed logger.log(u"Performing a vacuum on the database.", logger.DEBUG) self.connection.action("VACUUM") # included in build 502 (2013-11-24) class AddShowidTvdbidIndex(Add1080pAndRawHDQualities): """ Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries """ def test(self): return self.checkDBVersion() >= 13 def execute(self): backupDatabase(13) logger.log(u"Check for duplicate shows before adding unique index.") MainSanityCheck(self.connection).fix_duplicate_shows() logger.log(u"Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.") if not self.hasTable("idx_showid"): self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);") if not self.hasTable("idx_tvdb_id"): self.connection.action("CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);") self.incDBVersion() # included in build 502 (2013-11-24) class AddLastUpdateTVDB(AddShowidTvdbidIndex): """ Adding column last_update_tvdb to tv_shows for controlling nightly updates """ def test(self): return self.checkDBVersion() >= 14 def execute(self): backupDatabase(14) logger.log(u"Adding column last_update_tvdb to tvshows") if not self.hasColumn("tv_shows", "last_update_tvdb"): self.addColumn("tv_shows", "last_update_tvdb", default=1) self.incDBVersion() # included in build 504 (2014-04-14) class AddRequireAndIgnoreWords(AddLastUpdateTVDB): """ Adding column rls_require_words and rls_ignore_words to tv_shows """ def test(self): return self.checkDBVersion() >= 15 def execute(self): backupDatabase(15) logger.log(u"Adding column rls_require_words to tvshows") if not self.hasColumn("tv_shows", "rls_require_words"): self.addColumn("tv_shows", "rls_require_words", "TEXT", "") logger.log(u"Adding column rls_ignore_words to tvshows") if not self.hasColumn("tv_shows", "rls_ignore_words"): self.addColumn("tv_shows", "rls_ignore_words", "TEXT", "") self.incDBVersion() # included in build 507 (2014-11-16) class CleanupHistoryAndSpecials(AddRequireAndIgnoreWords): """ Cleanup older history entries and set specials from wanted to skipped """ def test(self): return self.checkDBVersion() >= 16 def execute(self): backupDatabase(16) logger.log(u"Setting special episodes status to SKIPPED.") self.connection.action("UPDATE tv_episodes SET status = ? WHERE status = ? AND season = 0", [common.SKIPPED, common.WANTED]) fix_ep_rls_group = [] fix_ep_release_name = [] # re-analyze snatched data logger.log(u"Analyzing history to correct bad data (this could take a moment, be patient)...") history_results = self.connection.select("SELECT * FROM history WHERE action % 100 = 2 ORDER BY date ASC") for cur_result in history_results: # find the associated download, if there isn't one then ignore it download_results = self.connection.select("SELECT * FROM history WHERE action % 100 = 4 AND showid = ? AND season = ? AND episode = ? AND quality = ? AND date > ?", [cur_result["showid"], cur_result["season"], cur_result["episode"], cur_result["quality"], cur_result["date"]]) # only continue if there was a download found (thats newer than the snatched) if not download_results: logger.log(u"Found a snatch in the history for " + cur_result["resource"] + " but couldn't find the associated download, skipping it", logger.DEBUG) continue # take the snatched nzb, clean it up so we can store it for the corresponding tv_episodes entry clean_nzb_name = helpers.remove_non_release_groups(helpers.remove_extension(cur_result["resource"])) # fixed known bad release_group data if download_results[0]["provider"].upper() in ["-1", "RP", "NZBGEEK"] or "." in download_results[0]["provider"]: try: np = NameParser(False) parse_result = np.parse(clean_nzb_name) except InvalidNameException: continue # leave off check for episode number so we can update season rip data as well? if parse_result.series_name and parse_result.season_number is not None and parse_result.release_group: fix_ep_rls_group.append(["UPDATE history SET provider = ? WHERE action = ? AND showid = ? AND season = ? AND episode = ? AND quality = ? AND date = ?", \ [parse_result.release_group, download_results[0]["action"], download_results[0]["showid"], download_results[0]["season"], download_results[0]["episode"], download_results[0]["quality"], download_results[0]["date"]] ]) # find the associated episode on disk ep_results = self.connection.select("SELECT episode_id, status, release_name FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND location != ''", [cur_result["showid"], cur_result["season"], cur_result["episode"]]) if not ep_results: logger.log(u"The episode " + cur_result["resource"] + " was found in history but doesn't exist on disk anymore, skipping", logger.DEBUG) continue # skip items that appears to have a 'scene' name already to avoid replacing locally pp/manually moved items match = re.search(".(xvid|x264|h.?264|mpeg-?2)", ep_results[0]["release_name"], re.I) if match: continue # get the status/quality of the existing ep and make sure it's what we expect ep_status, ep_quality = common.Quality.splitCompositeStatus(int(ep_results[0]["status"])) if ep_status != common.DOWNLOADED: continue if ep_quality != int(cur_result["quality"]): continue # take the extension off the filename, it's not needed file_name = ek.ek(os.path.basename, download_results[0]["resource"]) if '.' in file_name: file_name = file_name.rpartition('.')[0] # make sure this is actually a real release name and not a season pack or something for cur_name in (clean_nzb_name, file_name): logger.log(u"Checking if " + cur_name + " is actually a good release name", logger.DEBUG) try: np = NameParser(False) parse_result = np.parse(cur_name) except InvalidNameException: continue if parse_result.series_name and parse_result.season_number is not None and parse_result.episode_numbers and parse_result.release_group: # if all is well by this point we'll just put the release name into the database fix_ep_release_name.append(["UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?", [cur_name, ep_results[0]["episode_id"]]]) break logger.log(u"Corrected " + str(len(fix_ep_release_name)) + " release names (" + str(len(fix_ep_rls_group)) + " release groups) out of the " + str(len(history_results)) + " releases analyzed.") if len(fix_ep_rls_group) > 0: self.connection.mass_action(fix_ep_rls_group) if len(fix_ep_release_name) > 0: self.connection.mass_action(fix_ep_release_name) # now cleanup all downloaded release groups in the history fix_ep_rls_group = [] logger.log(u"Analyzing downloaded history release groups...") history_results = self.connection.select("SELECT * FROM history WHERE action % 100 = 4 ORDER BY date ASC") for cur_result in history_results: clean_provider = helpers.remove_non_release_groups(helpers.remove_extension(cur_result["provider"])) # take the data on the left of the _, fixes 'LOL_repost' if clean_provider and "_" in clean_provider: clean_provider = clean_provider.rsplit('_', 1)[0] if clean_provider != cur_result["provider"]: fix_ep_rls_group.append(["UPDATE history SET provider = ? WHERE action = ? AND showid = ? AND season = ? AND episode = ? AND quality = ? AND date = ?", \ [clean_provider, cur_result["action"], cur_result["showid"], cur_result["season"], cur_result["episode"], cur_result["quality"], cur_result["date"]] ]) logger.log(u"Corrected " + str(len(fix_ep_rls_group)) + " release groups.") if len(fix_ep_rls_group) > 0: self.connection.mass_action(fix_ep_rls_group) self.incDBVersion() # cleanup and reduce db if any previous data was removed logger.log(u"Performing a vacuum on the database.", logger.DEBUG) self.connection.action("VACUUM") # included in build 507 (2014-11-16) class AddSkipNotifications(CleanupHistoryAndSpecials): """ Adding column skip_notices to tv_shows """ def test(self): return self.checkDBVersion() >= 17 def execute(self): backupDatabase(17) logger.log(u"Adding column skip_notices to tvshows") if not self.hasColumn("tv_shows", "skip_notices"): self.addColumn("tv_shows", "skip_notices") self.incDBVersion() # included in build 507 (2014-11-16) class AddHistorySource(AddSkipNotifications): """ Adding column source to history """ def test(self): return self.checkDBVersion() >= 18 def execute(self): backupDatabase(18) logger.log(u"Adding column source to history") if not self.hasColumn("history", "source"): self.addColumn("history", "source", "TEXT", "") logger.log(u"Analyzing history and setting snatch source...") # set source to nzb by default self.connection.action("UPDATE history SET source = 'nzb' WHERE action % 100 = 2") # set source to torrent where needed set_torrent_source = [] history_results = self.connection.select("SELECT * FROM history WHERE action % 100 = 2 AND provider IN ('BTN', 'HDbits', 'TorrentLeech', 'TvTorrents')") for cur_result in history_results: set_torrent_source.append(["UPDATE history SET source = 'torrent' WHERE action = ? AND date = ? AND showid = ? AND provider = ? AND quality = ?", \ [cur_result["action"], cur_result["date"], cur_result["showid"], cur_result["provider"], cur_result["quality"]] ]) if len(set_torrent_source) > 0: self.connection.mass_action(set_torrent_source) self.incDBVersion()
31,970
Python
.py
465
55.630108
422
0.62269
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,381
autoProcessTV.py
midgetspy_Sick-Beard/autoProcessTV/autoProcessTV.py
#!/usr/bin/env python # Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os.path import sys if sys.version_info >= (2, 7, 9): import ssl as sslModule # Try importing Python 2 modules using new names try: import ConfigParser as configparser import urllib2 from urllib import urlencode # On error import Python 3 modules except ImportError: import configparser import urllib.request as urllib2 from urllib.parse import urlencode # workaround for broken urllib2 in python 2.6.5: wrong credentials lead to an infinite recursion if sys.version_info >= (2, 6, 5) and sys.version_info < (2, 6, 6): class HTTPBasicAuthHandler(urllib2.HTTPBasicAuthHandler): def retry_http_basic_auth(self, host, req, realm): # don't retry if auth failed if req.get_header(self.auth_header, None) is not None: return None return urllib2.HTTPBasicAuthHandler.retry_http_basic_auth(self, host, req, realm) else: HTTPBasicAuthHandler = urllib2.HTTPBasicAuthHandler def processEpisode(dir_to_process, org_NZB_name=None): # Default values host = "localhost" port = "8081" username = "" password = "" ssl = 0 web_root = "/" default_url = host + ":" + port + web_root if ssl: default_url = "https://" + default_url else: default_url = "http://" + default_url # Get values from config_file config = configparser.RawConfigParser() config_filename = os.path.join(os.path.dirname(sys.argv[0]), "autoProcessTV.cfg") if not os.path.isfile(config_filename): print ("ERROR: " + config_filename + " doesn\'t exist") print ("copy /rename " + config_filename + ".sample and edit\n") print ("Trying default url: " + default_url + "\n") else: try: print ("Loading config from " + config_filename + "\n") with open(config_filename, "r") as fp: config.readfp(fp) # Replace default values with config_file values host = config.get("SickBeard", "host") port = config.get("SickBeard", "port") username = config.get("SickBeard", "username") password = config.get("SickBeard", "password") try: ssl = int(config.get("SickBeard", "ssl")) except (configparser.NoOptionError, ValueError): pass try: web_root = config.get("SickBeard", "web_root") if not web_root.startswith("/"): web_root = "/" + web_root if not web_root.endswith("/"): web_root = web_root + "/" except configparser.NoOptionError: pass except EnvironmentError: e = sys.exc_info()[1] print ("Could not read configuration file: " + str(e)) # There was a config_file, don't use default values but exit sys.exit(1) params = {} params['quiet'] = 1 params['dir'] = dir_to_process if org_NZB_name != None: params['nzbName'] = org_NZB_name if ssl: protocol = "https://" else: protocol = "http://" url = protocol + host + ":" + port + web_root + "home/postprocess/processEpisode?" + urlencode(params) print ("Opening URL: " + url) try: password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() password_mgr.add_password(None, url, username, password) handler = HTTPBasicAuthHandler(password_mgr) if sys.version_info >= (2, 7, 9): opener = urllib2.build_opener(handler, urllib2.HTTPSHandler(context=sslModule._create_unverified_context())) else: opener = urllib2.build_opener(handler) urllib2.install_opener(opener) result = opener.open(url).readlines() for line in result: if line: print (line.strip()) except IOError: e = sys.exc_info()[1] print ("Unable to open URL: " + str(e)) sys.exit(1) if __name__ == "__main__": print ("This module is supposed to be used as import in other scripts and not run standalone.") print ("Use sabToSickBeard instead.") sys.exit(1)
5,003
Python
.py
122
33.483607
120
0.633643
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,382
hellaToSickBeard.py
midgetspy_Sick-Beard/autoProcessTV/hellaToSickBeard.py
#!/usr/bin/env python # Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sys try: import autoProcessTV except: print ("Can't import autoProcessTV.py, make sure it's in the same folder as " + sys.argv[0]) sys.exit(1) if len(sys.argv) < 4: print ("No folder supplied - is this being called from HellaVCR?") sys.exit(1) else: autoProcessTV.processEpisode(sys.argv[3], sys.argv[2])
1,112
Python
.py
29
36.310345
96
0.747681
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,383
sabToSickBeard.py
midgetspy_Sick-Beard/autoProcessTV/sabToSickBeard.py
#!/usr/bin/env python # Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sys try: import autoProcessTV except: print ("Can't import autoProcessTV.py, make sure it's in the same folder as " + sys.argv[0]) sys.exit(1) # SABnzbd user script parameters - see: http://wiki.sabnzbd.org/user-scripts # 0 sys.argv[0] is the name of this script # 1 The final directory of the job (full path) if len(sys.argv) < 2: print ("No folder supplied - is this being called from SABnzbd?") sys.exit(1) else: download_final_dir = sys.argv[1] # 2 The original name of the NZB file org_NZB_name = sys.argv[2] if len(sys.argv) > 3 else None # 3 Clean version of the job name (no path info and ".nzb" removed) clean_NZB_file = sys.argv[3] if len(sys.argv) > 4 else None # 4 Indexer's report number (if supported) indexer_report = sys.argv[4] if len(sys.argv) > 5 else None # 5 User-defined category sab_user_category = sys.argv[5] if len(sys.argv) > 6 else None # 6 Group that the NZB was posted in e.g. alt.binaries.x group_NZB = sys.argv[6] if len(sys.argv) > 7 else None # 7 Status of post processing. 0 = OK, 1=failed verification, 2=failed unpack, 3=1+2 sab_post_processing_status = sys.argv[7] if len(sys.argv) > 8 else None # Only final_dir and org_NZB_name are being used to process episodes autoProcessTV.processEpisode(download_final_dir, org_NZB_name)
2,089
Python
.py
46
43.586957
96
0.739773
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,384
profilehooks.py
midgetspy_Sick-Beard/lib/profilehooks.py
""" Profiling hooks This module contains a couple of decorators (`profile` and `coverage`) that can be used to wrap functions and/or methods to produce profiles and line coverage reports. There's a third convenient decorator (`timecall`) that measures the duration of function execution without the extra profiling overhead. Usage example (Python 2.4 or newer):: from profilehooks import profile, coverage @profile # or @coverage def fn(n): if n < 2: return 1 else: return n * fn(n-1) print fn(42) Usage example (Python 2.3 or older):: from profilehooks import profile, coverage def fn(n): if n < 2: return 1 else: return n * fn(n-1) # Now wrap that function in a decorator fn = profile(fn) # or coverage(fn) print fn(42) Reports for all thusly decorated functions will be printed to sys.stdout on program termination. You can alternatively request for immediate reports for each call by passing immediate=True to the profile decorator. There's also a @timecall decorator for printing the time to sys.stderr every time a function is called, when you just want to get a rough measure instead of a detailed (but costly) profile. Caveats A thread on python-dev convinced me that hotshot produces bogus numbers. See http://mail.python.org/pipermail/python-dev/2005-November/058264.html I don't know what will happen if a decorated function will try to call another decorated function. All decorators probably need to explicitly support nested profiling (currently TraceFuncCoverage is the only one that supports this, while HotShotFuncProfile has support for recursive functions.) Profiling with hotshot creates temporary files (*.prof for profiling, *.cprof for coverage) in the current directory. These files are not cleaned up. Exception: when you specify a filename to the profile decorator (to store the pstats.Stats object for later inspection), the temporary file will be the filename you specified with '.raw' appended at the end. Coverage analysis with hotshot seems to miss some executions resulting in lower line counts and some lines errorneously marked as never executed. For this reason coverage analysis now uses trace.py which is slower, but more accurate. Copyright (c) 2004--2008 Marius Gedminas <[email protected]> Copyright (c) 2007 Hanno Schlichting Copyright (c) 2008 Florian Schulze Released under the MIT licence since December 2006: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. (Previously it was distributed under the GNU General Public Licence.) """ # $Id: profilehooks.py 29 2010-08-13 16:29:20Z mg $ __author__ = "Marius Gedminas ([email protected])" __copyright__ = "Copyright 2004-2009 Marius Gedminas" __license__ = "MIT" __version__ = "1.4" __date__ = "2009-03-31" import atexit import inspect import sys import re # For profiling from profile import Profile import pstats # For hotshot profiling (inaccurate!) try: import hotshot import hotshot.stats except ImportError: hotshot = None # For trace.py coverage import trace # For hotshot coverage (inaccurate!; uses undocumented APIs; might break) if hotshot is not None: import _hotshot import hotshot.log # For cProfile profiling (best) try: import cProfile except ImportError: cProfile = None # For timecall import time # registry of available profilers AVAILABLE_PROFILERS = {} def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot')): """Mark `fn` for profiling. If `skip` is > 0, first `skip` calls to `fn` will not be profiled. If `immediate` is False, profiling results will be printed to sys.stdout on program termination. Otherwise results will be printed after each call. If `dirs` is False only the name of the file will be printed. Otherwise the full path is used. `sort` can be a list of sort keys (defaulting to ['cumulative', 'time', 'calls']). The following ones are recognized:: 'calls' -- call count 'cumulative' -- cumulative time 'file' -- file name 'line' -- line number 'module' -- file name 'name' -- function name 'nfl' -- name/file/line 'pcalls' -- call count 'stdname' -- standard name 'time' -- internal time `entries` limits the output to the first N entries. `profiler` can be used to select the preferred profiler, or specify a sequence of them, in order of preference. The default is ('cProfile'. 'profile', 'hotshot'). If `filename` is specified, the profile stats will be stored in the named file. You can load them pstats.Stats(filename). Usage:: def fn(...): ... fn = profile(fn, skip=1) If you are using Python 2.4, you should be able to use the decorator syntax:: @profile(skip=3) def fn(...): ... or just :: @profile def fn(...): ... """ if fn is None: # @profile() syntax -- we are a decorator maker def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator # @profile syntax -- we are a decorator. if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(AVAILABLE_PROFILERS)) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) # fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...) # or HotShotFuncProfile # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn def coverage(fn): """Mark `fn` for line coverage analysis. Results will be printed to sys.stdout on program termination. Usage:: def fn(...): ... fn = coverage(fn) If you are using Python 2.4, you should be able to use the decorator syntax:: @coverage def fn(...): ... """ fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn def coverage_with_hotshot(fn): """Mark `fn` for line coverage analysis. Uses the 'hotshot' module for fast coverage analysis. BUG: Produces inaccurate results. See the docstring of `coverage` for usage examples. """ fp = HotShotFuncCoverage(fn) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn class FuncProfile(object): """Profiler for a function (uses profile).""" # This flag is shared between all instances in_profiler = False Profile = Profile def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40): """Creates a profiler for a function. Every profiler has its own log file (the name of which is derived from the function name). FuncProfile registers an atexit handler that prints profiling information to sys.stderr when the program terminates. """ self.fn = fn self.skip = skip self.filename = filename self.immediate = immediate self.dirs = dirs self.sort = sort or ('cumulative', 'time', 'calls') if isinstance(self.sort, str): self.sort = (self.sort, ) self.entries = entries self.reset_stats() atexit.register(self.atexit) def __call__(self, *args, **kw): """Profile a singe call to the function.""" self.ncalls += 1 if self.skip > 0: self.skip -= 1 self.skipped += 1 return self.fn(*args, **kw) if FuncProfile.in_profiler: # handle recursive calls return self.fn(*args, **kw) # You cannot reuse the same profiler for many calls and accumulate # stats that way. :-/ profiler = self.Profile() try: FuncProfile.in_profiler = True return profiler.runcall(self.fn, *args, **kw) finally: FuncProfile.in_profiler = False self.stats.add(profiler) if self.immediate: self.print_stats() self.reset_stats() def print_stats(self): """Print profile information to sys.stdout.""" funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print print "*** PROFILER RESULTS ***" print "%s (%s:%s)" % (funcname, filename, lineno) print "function called %d times" % self.ncalls, if self.skipped: print "(%d calls not profiled)" % self.skipped else: print print stats = self.stats if self.filename: stats.dump_stats(self.filename) if not self.dirs: stats.strip_dirs() stats.sort_stats(*self.sort) stats.print_stats(self.entries) def reset_stats(self): """Reset accumulated profiler statistics.""" # Note: not using self.Profile, since pstats.Stats() fails then self.stats = pstats.Stats(Profile()) self.ncalls = 0 self.skipped = 0 def atexit(self): """Stop profiling and print profile information to sys.stdout. This function is registered as an atexit hook. """ if not self.immediate: self.print_stats() AVAILABLE_PROFILERS['profile'] = FuncProfile if cProfile is not None: class CProfileFuncProfile(FuncProfile): """Profiler for a function (uses cProfile).""" Profile = cProfile.Profile AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile if hotshot is not None: class HotShotFuncProfile(object): """Profiler for a function (uses hotshot).""" # This flag is shared between all instances in_profiler = False def __init__(self, fn, skip=0, filename=None): """Creates a profiler for a function. Every profiler has its own log file (the name of which is derived from the function name). HotShotFuncProfile registers an atexit handler that prints profiling information to sys.stderr when the program terminates. The log file is not removed and remains there to clutter the current working directory. """ self.fn = fn self.filename = filename if self.filename: self.logfilename = filename + ".raw" else: self.logfilename = fn.__name__ + ".prof" self.profiler = hotshot.Profile(self.logfilename) self.ncalls = 0 self.skip = skip self.skipped = 0 atexit.register(self.atexit) def __call__(self, *args, **kw): """Profile a singe call to the function.""" self.ncalls += 1 if self.skip > 0: self.skip -= 1 self.skipped += 1 return self.fn(*args, **kw) if HotShotFuncProfile.in_profiler: # handle recursive calls return self.fn(*args, **kw) try: HotShotFuncProfile.in_profiler = True return self.profiler.runcall(self.fn, *args, **kw) finally: HotShotFuncProfile.in_profiler = False def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ self.profiler.close() funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print print "*** PROFILER RESULTS ***" print "%s (%s:%s)" % (funcname, filename, lineno) print "function called %d times" % self.ncalls, if self.skipped: print "(%d calls not profiled)" % self.skipped else: print print stats = hotshot.stats.load(self.logfilename) # hotshot.stats.load takes ages, and the .prof file eats megabytes, but # a saved stats object is small and fast if self.filename: stats.dump_stats(self.filename) # it is best to save before strip_dirs stats.strip_dirs() stats.sort_stats('cumulative', 'time', 'calls') stats.print_stats(40) AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile class HotShotFuncCoverage: """Coverage analysis for a function (uses _hotshot). HotShot coverage is reportedly faster than trace.py, but it appears to have problems with exceptions; also line counts in coverage reports are generally lower from line counts produced by TraceFuncCoverage. Is this my bug, or is it a problem with _hotshot? """ def __init__(self, fn): """Creates a profiler for a function. Every profiler has its own log file (the name of which is derived from the function name). HotShotFuncCoverage registers an atexit handler that prints profiling information to sys.stderr when the program terminates. The log file is not removed and remains there to clutter the current working directory. """ self.fn = fn self.logfilename = fn.__name__ + ".cprof" self.profiler = _hotshot.coverage(self.logfilename) self.ncalls = 0 atexit.register(self.atexit) def __call__(self, *args, **kw): """Profile a singe call to the function.""" self.ncalls += 1 return self.profiler.runcall(self.fn, args, kw) def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ self.profiler.close() funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print print "*** COVERAGE RESULTS ***" print "%s (%s:%s)" % (funcname, filename, lineno) print "function called %d times" % self.ncalls print fs = FuncSource(self.fn) reader = hotshot.log.LogReader(self.logfilename) for what, (filename, lineno, funcname), tdelta in reader: if filename != fs.filename: continue if what == hotshot.log.LINE: fs.mark(lineno) if what == hotshot.log.ENTER: # hotshot gives us the line number of the function definition # and never gives us a LINE event for the first statement in # a function, so if we didn't perform this mapping, the first # statement would be marked as never executed if lineno == fs.firstlineno: lineno = fs.firstcodelineno fs.mark(lineno) reader.close() print fs class TraceFuncCoverage: """Coverage analysis for a function (uses trace module). HotShot coverage analysis is reportedly faster, but it appears to have problems with exceptions. """ # Shared between all instances so that nested calls work tracer = trace.Trace(count=True, trace=False, ignoredirs=[sys.prefix, sys.exec_prefix]) # This flag is also shared between all instances tracing = False def __init__(self, fn): """Creates a profiler for a function. Every profiler has its own log file (the name of which is derived from the function name). TraceFuncCoverage registers an atexit handler that prints profiling information to sys.stderr when the program terminates. The log file is not removed and remains there to clutter the current working directory. """ self.fn = fn self.logfilename = fn.__name__ + ".cprof" self.ncalls = 0 atexit.register(self.atexit) def __call__(self, *args, **kw): """Profile a singe call to the function.""" self.ncalls += 1 if TraceFuncCoverage.tracing: return self.fn(*args, **kw) try: TraceFuncCoverage.tracing = True return self.tracer.runfunc(self.fn, *args, **kw) finally: TraceFuncCoverage.tracing = False def atexit(self): """Stop profiling and print profile information to sys.stderr. This function is registered as an atexit hook. """ funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print print "*** COVERAGE RESULTS ***" print "%s (%s:%s)" % (funcname, filename, lineno) print "function called %d times" % self.ncalls print fs = FuncSource(self.fn) for (filename, lineno), count in self.tracer.counts.items(): if filename != fs.filename: continue fs.mark(lineno, count) print fs never_executed = fs.count_never_executed() if never_executed: print "%d lines were not executed." % never_executed class FuncSource: """Source code annotator for a function.""" blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$") def __init__(self, fn): self.fn = fn self.filename = inspect.getsourcefile(fn) self.source, self.firstlineno = inspect.getsourcelines(fn) self.sourcelines = {} self.firstcodelineno = self.firstlineno self.find_source_lines() def find_source_lines(self): """Mark all executable source lines in fn as executed 0 times.""" strs = trace.find_strings(self.filename) lines = trace.find_lines_from_code(self.fn.func_code, strs) self.firstcodelineno = sys.maxint for lineno in lines: self.firstcodelineno = min(self.firstcodelineno, lineno) self.sourcelines.setdefault(lineno, 0) if self.firstcodelineno == sys.maxint: self.firstcodelineno = self.firstlineno def mark(self, lineno, count=1): """Mark a given source line as executed count times. Multiple calls to mark for the same lineno add up. """ self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count def count_never_executed(self): """Count statements that were never executed.""" lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter def __str__(self): """Return annotated source code for the function.""" lines = [] lineno = self.firstlineno for line in self.source: counter = self.sourcelines.get(lineno) if counter is None: prefix = ' ' * 7 elif counter == 0: if self.blank_rx.match(line): prefix = ' ' * 7 else: prefix = '>' * 6 + ' ' else: prefix = '%5d: ' % counter lines.append(prefix + line) lineno += 1 return ''.join(lines) def timecall(fn=None, immediate=True, timer=time.time): """Wrap `fn` and print its execution time. Example:: @timecall def somefunc(x, y): time.sleep(x * y) somefunc(2, 3) will print the time taken by somefunc on every call. If you want just a summary at program termination, use @timecall(immediate=False) You can also choose a timing method other than the default ``time.time()``, e.g.: @timecall(timer=time.clock) """ if fn is None: # @timecall() syntax -- we are a decorator maker def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator # @timecall syntax -- we are a decorator. fp = FuncTimer(fn, immediate=immediate, timer=timer) # We cannot return fp or fp.__call__ directly as that would break method # definitions, instead we need to return a plain function. def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn class FuncTimer(object): def __init__(self, fn, immediate, timer): self.fn = fn self.ncalls = 0 self.totaltime = 0 self.immediate = immediate self.timer = timer if not immediate: atexit.register(self.atexit) def __call__(self, *args, **kw): """Profile a singe call to the function.""" fn = self.fn timer = self.timer self.ncalls += 1 try: start = timer() return fn(*args, **kw) finally: duration = timer() - start self.totaltime += duration if self.immediate: funcname = fn.__name__ filename = fn.func_code.co_filename lineno = fn.func_code.co_firstlineno print >> sys.stderr, "\n %s (%s:%s):\n %.3f seconds\n" % ( funcname, filename, lineno, duration) def atexit(self): if not self.ncalls: return funcname = self.fn.__name__ filename = self.fn.func_code.co_filename lineno = self.fn.func_code.co_firstlineno print ("\n %s (%s:%s):\n" " %d calls, %.3f seconds (%.3f seconds per call)\n" % ( funcname, filename, lineno, self.ncalls, self.totaltime, self.totaltime / self.ncalls))
24,752
Python
.py
592
32.663851
83
0.614139
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,385
MultipartPostHandler.py
midgetspy_Sick-Beard/lib/MultipartPostHandler.py
#!/usr/bin/python #### # 06/2010 Nic Wolfe <[email protected]> # 02/2006 Will Holcomb <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # import urllib import urllib2 import mimetools, mimetypes import os, sys # Controls how sequences are uncoded. If true, elements may be given multiple values by # assigning a sequence. doseq = 1 class MultipartPostHandler(urllib2.BaseHandler): handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first def http_request(self, request): data = request.get_data() if data is not None and type(data) != str: v_files = [] v_vars = [] try: for(key, value) in data.items(): if type(value) in (file, list, tuple): v_files.append((key, value)) else: v_vars.append((key, value)) except TypeError: systype, value, traceback = sys.exc_info() raise TypeError, "not a valid non-string sequence or mapping object", traceback if len(v_files) == 0: data = urllib.urlencode(v_vars, doseq) else: boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files) contenttype = 'multipart/form-data; boundary=%s' % boundary if(request.has_header('Content-Type') and request.get_header('Content-Type').find('multipart/form-data') != 0): print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data') request.add_unredirected_header('Content-Type', contenttype) request.add_data(data) return request @staticmethod def multipart_encode(vars, files, boundary = None, buffer = None): if boundary is None: boundary = mimetools.choose_boundary() if buffer is None: buffer = '' for(key, value) in vars: buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"' % key buffer += '\r\n\r\n' + value + '\r\n' for(key, fd) in files: # allow them to pass in a file or a tuple with name & data if type(fd) == file: name_in = fd.name fd.seek(0) data_in = fd.read() elif type(fd) in (tuple, list): name_in, data_in = fd filename = os.path.basename(name_in) contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' buffer += '--%s\r\n' % boundary buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename) buffer += 'Content-Type: %s\r\n' % contenttype # buffer += 'Content-Length: %s\r\n' % file_size buffer += '\r\n' + data_in + '\r\n' buffer += '--%s--\r\n\r\n' % boundary return boundary, buffer https_request = http_request
3,646
Python
.py
77
35.883117
111
0.581177
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,386
configobj.py
midgetspy_Sick-Beard/lib/configobj.py
# configobj.py # A config file reader/writer that supports nested sections in config files. # Copyright (C) 2005-2009 Michael Foord, Nicola Larosa # E-mail: fuzzyman AT voidspace DOT org DOT uk # nico AT tekNico DOT net # ConfigObj 4 # http://www.voidspace.org.uk/python/configobj.html # Released subject to the BSD License # Please see http://www.voidspace.org.uk/python/license.shtml # Scripts maintained at http://www.voidspace.org.uk/python/index.shtml # For information about bugfixes, updates and support, please join the # ConfigObj mailing list: # http://lists.sourceforge.net/lists/listinfo/configobj-develop # Comments, suggestions and bug reports welcome. from __future__ import generators import sys import os import re compiler = None try: import compiler except ImportError: # for IronPython pass try: from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE except ImportError: # Python 2.2 does not have these # UTF-8 BOM_UTF8 = '\xef\xbb\xbf' # UTF-16, little endian BOM_UTF16_LE = '\xff\xfe' # UTF-16, big endian BOM_UTF16_BE = '\xfe\xff' if sys.byteorder == 'little': # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_LE else: # UTF-16, native endianness BOM_UTF16 = BOM_UTF16_BE # A dictionary mapping BOM to # the encoding to decode with, and what to set the # encoding attribute to. BOMS = { BOM_UTF8: ('utf_8', None), BOM_UTF16_BE: ('utf16_be', 'utf_16'), BOM_UTF16_LE: ('utf16_le', 'utf_16'), BOM_UTF16: ('utf_16', 'utf_16'), } # All legal variants of the BOM codecs. # TODO: the list of aliases is not meant to be exhaustive, is there a # better way ? BOM_LIST = { 'utf_16': 'utf_16', 'u16': 'utf_16', 'utf16': 'utf_16', 'utf-16': 'utf_16', 'utf16_be': 'utf16_be', 'utf_16_be': 'utf16_be', 'utf-16be': 'utf16_be', 'utf16_le': 'utf16_le', 'utf_16_le': 'utf16_le', 'utf-16le': 'utf16_le', 'utf_8': 'utf_8', 'u8': 'utf_8', 'utf': 'utf_8', 'utf8': 'utf_8', 'utf-8': 'utf_8', } # Map of encodings to the BOM to write. BOM_SET = { 'utf_8': BOM_UTF8, 'utf_16': BOM_UTF16, 'utf16_be': BOM_UTF16_BE, 'utf16_le': BOM_UTF16_LE, None: BOM_UTF8 } def match_utf8(encoding): return BOM_LIST.get(encoding.lower()) == 'utf_8' # Quote strings used for writing values squot = "'%s'" dquot = '"%s"' noquot = "%s" wspace_plus = ' \r\n\v\t\'"' tsquot = '"""%s"""' tdquot = "'''%s'''" try: enumerate except NameError: def enumerate(obj): """enumerate for Python 2.2.""" i = -1 for item in obj: i += 1 yield i, item # Sentinel for use in getattr calls to replace hasattr MISSING = object() __version__ = '4.6.0' __revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $' __docformat__ = "restructuredtext en" __all__ = ( '__version__', 'DEFAULT_INDENT_TYPE', 'DEFAULT_INTERPOLATION', 'ConfigObjError', 'NestingError', 'ParseError', 'DuplicateError', 'ConfigspecError', 'ConfigObj', 'SimpleVal', 'InterpolationError', 'InterpolationLoopError', 'MissingInterpolationOption', 'RepeatSectionError', 'ReloadError', 'UnreprError', 'UnknownType', '__docformat__', 'flatten_errors', ) DEFAULT_INTERPOLATION = 'configparser' DEFAULT_INDENT_TYPE = ' ' MAX_INTERPOL_DEPTH = 10 OPTION_DEFAULTS = { 'interpolation': True, 'raise_errors': False, 'list_values': True, 'create_empty': False, 'file_error': False, 'configspec': None, 'stringify': True, # option may be set to one of ('', ' ', '\t') 'indent_type': None, 'encoding': None, 'default_encoding': None, 'unrepr': False, 'write_empty_values': False, } def getObj(s): s = "a=" + s if compiler is None: raise ImportError('compiler module not available') p = compiler.parse(s) return p.getChildren()[1].getChildren()[0].getChildren()[1] class UnknownType(Exception): pass class Builder(object): def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise UnknownType(o.__class__.__name__) return m(o) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): if o.name == 'None': return None if o.name == 'True': return True if o.name == 'False': return False # An undefined Name raise UnknownType('Undefined Name') def build_Add(self, o): real, imag = map(self.build_Const, o.getChildren()) try: real = float(real) except TypeError: raise UnknownType('Add') if not isinstance(imag, complex) or imag.real != 0.0: raise UnknownType('Add') return real + imag def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_UnarySub(self, o): return - self.build_Const(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build_Const(o.getChildren()[0]) _builder = Builder() def unrepr(s): if not s: return s return _builder.build(getObj(s)) class ConfigObjError(SyntaxError): """ This is the base class for all errors that ConfigObj raises. It is a subclass of SyntaxError. """ def __init__(self, message='', line_number=None, line=''): self.line = line self.line_number = line_number SyntaxError.__init__(self, message) class NestingError(ConfigObjError): """ This error indicates a level of nesting that doesn't match. """ class ParseError(ConfigObjError): """ This error indicates that a line is badly written. It is neither a valid ``key = value`` line, nor a valid section marker line. """ class ReloadError(IOError): """ A 'reload' operation failed. This exception is a subclass of ``IOError``. """ def __init__(self): IOError.__init__(self, 'reload failed, filename is not set.') class DuplicateError(ConfigObjError): """ The keyword or section specified already exists. """ class ConfigspecError(ConfigObjError): """ An error occured whilst parsing a configspec. """ class InterpolationError(ConfigObjError): """Base class for the two interpolation errors.""" class InterpolationLoopError(InterpolationError): """Maximum interpolation depth exceeded in string interpolation.""" def __init__(self, option): InterpolationError.__init__( self, 'interpolation loop detected in value "%s".' % option) class RepeatSectionError(ConfigObjError): """ This error indicates additional sections in a section with a ``__many__`` (repeated) section. """ class MissingInterpolationOption(InterpolationError): """A value specified for interpolation was missing.""" def __init__(self, option): InterpolationError.__init__( self, 'missing option "%s" in interpolation.' % option) class UnreprError(ConfigObjError): """An error parsing in unrepr mode.""" class InterpolationEngine(object): """ A helper class to help perform string interpolation. This class is an abstract base class; its descendants perform the actual work. """ # compiled regexp to use in self.interpolate() _KEYCRE = re.compile(r"%\(([^)]*)\)s") def __init__(self, section): # the Section instance that "owns" this engine self.section = section def interpolate(self, key, value): def recursive_interpolate(key, value, section, backtrail): """The function that does the actual work. ``value``: the string we're trying to interpolate. ``section``: the section in which that string was found ``backtrail``: a dict to keep track of where we've been, to detect and prevent infinite recursion loops This is similar to a depth-first-search algorithm. """ # Have we been here already? if backtrail.has_key((key, section.name)): # Yes - infinite loop detected raise InterpolationLoopError(key) # Place a marker on our backtrail so we won't come back here again backtrail[(key, section.name)] = 1 # Now start the actual work match = self._KEYCRE.search(value) while match: # The actual parsing of the match is implementation-dependent, # so delegate to our helper function k, v, s = self._parse_match(match) if k is None: # That's the signal that no further interpolation is needed replacement = v else: # Further interpolation may be needed to obtain final value replacement = recursive_interpolate(k, v, s, backtrail) # Replace the matched string with its final value start, end = match.span() value = ''.join((value[:start], replacement, value[end:])) new_search_start = start + len(replacement) # Pick up the next interpolation key, if any, for next time # through the while loop match = self._KEYCRE.search(value, new_search_start) # Now safe to come back here again; remove marker from backtrail del backtrail[(key, section.name)] return value # Back in interpolate(), all we have to do is kick off the recursive # function with appropriate starting values value = recursive_interpolate(key, value, self.section, {}) return value def _fetch(self, key): """Helper function to fetch values from owning section. Returns a 2-tuple: the value, and the section where it was found. """ # switch off interpolation before we try and fetch anything ! save_interp = self.section.main.interpolation self.section.main.interpolation = False # Start at section that "owns" this InterpolationEngine current_section = self.section while True: # try the current section first val = current_section.get(key) if val is not None: break # try "DEFAULT" next val = current_section.get('DEFAULT', {}).get(key) if val is not None: break # move up to parent and try again # top-level's parent is itself if current_section.parent is current_section: # reached top level, time to give up break current_section = current_section.parent # restore interpolation to previous value before returning self.section.main.interpolation = save_interp if val is None: raise MissingInterpolationOption(key) return val, current_section def _parse_match(self, match): """Implementation-dependent helper function. Will be passed a match object corresponding to the interpolation key we just found (e.g., "%(foo)s" or "$foo"). Should look up that key in the appropriate config file section (using the ``_fetch()`` helper function) and return a 3-tuple: (key, value, section) ``key`` is the name of the key we're looking for ``value`` is the value found for that key ``section`` is a reference to the section where it was found ``key`` and ``section`` should be None if no further interpolation should be performed on the resulting value (e.g., if we interpolated "$$" and returned "$"). """ raise NotImplementedError() class ConfigParserInterpolation(InterpolationEngine): """Behaves like ConfigParser.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s") def _parse_match(self, match): key = match.group(1) value, section = self._fetch(key) return key, value, section class TemplateInterpolation(InterpolationEngine): """Behaves like string.Template.""" _delimiter = '$' _KEYCRE = re.compile(r""" \$(?: (?P<escaped>\$) | # Two $ signs (?P<named>[_a-z][_a-z0-9]*) | # $name format {(?P<braced>[^}]*)} # ${name} format ) """, re.IGNORECASE | re.VERBOSE) def _parse_match(self, match): # Valid name (in or out of braces): fetch value from section key = match.group('named') or match.group('braced') if key is not None: value, section = self._fetch(key) return key, value, section # Escaped delimiter (e.g., $$): return single delimiter if match.group('escaped') is not None: # Return None for key and section to indicate it's time to stop return None, self._delimiter, None # Anything else: ignore completely, just return it unchanged return None, match.group(), None interpolation_engines = { 'configparser': ConfigParserInterpolation, 'template': TemplateInterpolation, } def __newobj__(cls, *args): # Hack for pickle return cls.__new__(cls, *args) class Section(dict): """ A dictionary-like object that represents a section in a config file. It does string interpolation if the 'interpolation' attribute of the 'main' object is set to True. Interpolation is tried first from this object, then from the 'DEFAULT' section of this object, next from the parent and its 'DEFAULT' section, and so on until the main object is reached. A Section will behave like an ordered dictionary - following the order of the ``scalars`` and ``sections`` attributes. You can use this to change the order of members. Iteration follows the order: scalars, then sections. """ def __setstate__(self, state): dict.update(self, state[0]) self.__dict__.update(state[1]) def __reduce__(self): state = (dict(self), self.__dict__) return (__newobj__, (self.__class__,), state) def __init__(self, parent, depth, main, indict=None, name=None): """ * parent is the section above * depth is the depth level of this section * main is the main ConfigObj * indict is a dictionary to initialise the section with """ if indict is None: indict = {} dict.__init__(self) # used for nesting level *and* interpolation self.parent = parent # used for the interpolation attribute self.main = main # level of nesting depth of this Section self.depth = depth # purely for information self.name = name # self._initialise() # we do this explicitly so that __setitem__ is used properly # (rather than just passing to ``dict.__init__``) for entry, value in indict.iteritems(): self[entry] = value def _initialise(self): # the sequence of scalar values in this Section self.scalars = [] # the sequence of sections in this Section self.sections = [] # for comments :-) self.comments = {} self.inline_comments = {} # the configspec self.configspec = None # for defaults self.defaults = [] self.default_values = {} def _interpolate(self, key, value): try: # do we already have an interpolation engine? engine = self._interpolation_engine except AttributeError: # not yet: first time running _interpolate(), so pick the engine name = self.main.interpolation if name == True: # note that "if name:" would be incorrect here # backwards-compatibility: interpolation=True means use default name = DEFAULT_INTERPOLATION name = name.lower() # so that "Template", "template", etc. all work class_ = interpolation_engines.get(name, None) if class_ is None: # invalid value for self.main.interpolation self.main.interpolation = False return value else: # save reference to engine so we don't have to do this again engine = self._interpolation_engine = class_(self) # let the engine do the actual work return engine.interpolate(key, value) def __getitem__(self, key): """Fetch the item and do string interpolation.""" val = dict.__getitem__(self, key) if self.main.interpolation and isinstance(val, basestring): return self._interpolate(key, val) return val def __setitem__(self, key, value, unrepr=False): """ Correctly set a value. Making dictionary values Section instances. (We have to special case 'Section' instances - which are also dicts) Keys must be strings. Values need only be strings (or lists of strings) if ``main.stringify`` is set. ``unrepr`` must be set when setting a value to a dictionary, without creating a new sub-section. """ if not isinstance(key, basestring): raise ValueError('The key "%s" is not a string.' % key) # add the comment if not self.comments.has_key(key): self.comments[key] = [] self.inline_comments[key] = '' # remove the entry from defaults if key in self.defaults: self.defaults.remove(key) # if isinstance(value, Section): if not self.has_key(key): self.sections.append(key) dict.__setitem__(self, key, value) elif isinstance(value, dict) and not unrepr: # First create the new depth level, # then create the section if not self.has_key(key): self.sections.append(key) new_depth = self.depth + 1 dict.__setitem__( self, key, Section( self, new_depth, self.main, indict=value, name=key)) else: if not self.has_key(key): self.scalars.append(key) if not self.main.stringify: if isinstance(value, basestring): pass elif isinstance(value, (list, tuple)): for entry in value: if not isinstance(entry, basestring): raise TypeError('Value is not a string "%s".' % entry) else: raise TypeError('Value is not a string "%s".' % value) dict.__setitem__(self, key, value) def __delitem__(self, key): """Remove items from the sequence when deleting.""" dict. __delitem__(self, key) if key in self.scalars: self.scalars.remove(key) else: self.sections.remove(key) del self.comments[key] del self.inline_comments[key] def get(self, key, default=None): """A version of ``get`` that doesn't bypass string interpolation.""" try: return self[key] except KeyError: return default def update(self, indict): """ A version of update that uses our ``__setitem__``. """ for entry in indict: self[entry] = indict[entry] def pop(self, key, *args): """ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised' """ val = dict.pop(self, key, *args) if key in self.scalars: del self.comments[key] del self.inline_comments[key] self.scalars.remove(key) elif key in self.sections: del self.comments[key] del self.inline_comments[key] self.sections.remove(key) if self.main.interpolation and isinstance(val, basestring): return self._interpolate(key, val) return val def popitem(self): """Pops the first (key,val)""" sequence = (self.scalars + self.sections) if not sequence: raise KeyError(": 'popitem(): dictionary is empty'") key = sequence[0] val = self[key] del self[key] return key, val def clear(self): """ A version of clear that also affects scalars/sections Also clears comments and configspec. Leaves other attributes alone : depth/main/parent are not affected """ dict.clear(self) self.scalars = [] self.sections = [] self.comments = {} self.inline_comments = {} self.configspec = None def setdefault(self, key, default=None): """A version of setdefault that sets sequence if appropriate.""" try: return self[key] except KeyError: self[key] = default return self[key] def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples""" return zip((self.scalars + self.sections), self.values()) def keys(self): """D.keys() -> list of D's keys""" return (self.scalars + self.sections) def values(self): """D.values() -> list of D's values""" return [self[key] for key in (self.scalars + self.sections)] def iteritems(self): """D.iteritems() -> an iterator over the (key, value) items of D""" return iter(self.items()) def iterkeys(self): """D.iterkeys() -> an iterator over the keys of D""" return iter((self.scalars + self.sections)) __iter__ = iterkeys def itervalues(self): """D.itervalues() -> an iterator over the values of D""" return iter(self.values()) def __repr__(self): """x.__repr__() <==> repr(x)""" return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)]) __str__ = __repr__ __str__.__doc__ = "x.__str__() <==> str(x)" # Extra methods - not in a normal dictionary def dict(self): """ Return a deepcopy of self as a dictionary. All members that are ``Section`` instances are recursively turned to ordinary dictionaries - by calling their ``dict`` method. >>> n = a.dict() >>> n == a 1 >>> n is a 0 """ newdict = {} for entry in self: this_entry = self[entry] if isinstance(this_entry, Section): this_entry = this_entry.dict() elif isinstance(this_entry, list): # create a copy rather than a reference this_entry = list(this_entry) elif isinstance(this_entry, tuple): # create a copy rather than a reference this_entry = tuple(this_entry) newdict[entry] = this_entry return newdict def merge(self, indict): """ A recursive update - useful for merging config files. >>> a = '''[section1] ... option1 = True ... [[subsection]] ... more_options = False ... # end of file'''.splitlines() >>> b = '''# File is user.ini ... [section1] ... option1 = False ... # end of file'''.splitlines() >>> c1 = ConfigObj(b) >>> c2 = ConfigObj(a) >>> c2.merge(c1) >>> c2 ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}) """ for key, val in indict.items(): if (key in self and isinstance(self[key], dict) and isinstance(val, dict)): self[key].merge(val) else: self[key] = val def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment def walk(self, function, raise_errors=True, call_on_sections=False, **keywargs): """ Walk every member and call a function on the keyword and value. Return a dictionary of the return values If the function raises an exception, raise the errror unless ``raise_errors=False``, in which case set the return value to ``False``. Any unrecognised keyword arguments you pass to walk, will be pased on to the function you pass in. Note: if ``call_on_sections`` is ``True`` then - on encountering a subsection, *first* the function is called for the *whole* subsection, and then recurses into it's members. This means your function must be able to handle strings, dictionaries and lists. This allows you to change the key of subsections as well as for ordinary members. The return value when called on the whole subsection has to be discarded. See the encode and decode methods for examples, including functions. .. admonition:: caution You can use ``walk`` to transform the names of members of a section but you mustn't add or delete members. >>> config = '''[XXXXsection] ... XXXXkey = XXXXvalue'''.splitlines() >>> cfg = ConfigObj(config) >>> cfg ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}}) >>> def transform(section, key): ... val = section[key] ... newkey = key.replace('XXXX', 'CLIENT1') ... section.rename(key, newkey) ... if isinstance(val, (tuple, list, dict)): ... pass ... else: ... val = val.replace('XXXX', 'CLIENT1') ... section[newkey] = val >>> cfg.walk(transform, call_on_sections=True) {'CLIENT1section': {'CLIENT1key': None}} >>> cfg ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}) """ out = {} # scalars first for i in range(len(self.scalars)): entry = self.scalars[i] try: val = function(self, entry, **keywargs) # bound again in case name has changed entry = self.scalars[i] out[entry] = val except Exception: if raise_errors: raise else: entry = self.scalars[i] out[entry] = False # then sections for i in range(len(self.sections)): entry = self.sections[i] if call_on_sections: try: function(self, entry, **keywargs) except Exception: if raise_errors: raise else: entry = self.sections[i] out[entry] = False # bound again in case name has changed entry = self.sections[i] # previous result is discarded out[entry] = self[entry].walk( function, raise_errors=raise_errors, call_on_sections=call_on_sections, **keywargs) return out def as_bool(self, key): """ Accepts a key as input. The corresponding value must be a string or the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to retain compatibility with Python 2.2. If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns ``True``. If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns ``False``. ``as_bool`` is not case sensitive. Any other input will raise a ``ValueError``. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_bool('a') Traceback (most recent call last): ValueError: Value "fish" is neither True nor False >>> a['b'] = 'True' >>> a.as_bool('b') 1 >>> a['b'] = 'off' >>> a.as_bool('b') 0 """ val = self[key] if val == True: return True elif val == False: return False else: try: if not isinstance(val, basestring): # TODO: Why do we raise a KeyError here? raise KeyError() else: return self.main._bools[val.lower()] except KeyError: raise ValueError('Value "%s" is neither True nor False' % val) def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key]) def as_float(self, key): """ A convenience method which coerces the specified value to a float. If the value is an invalid literal for ``float``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_float('a') Traceback (most recent call last): ValueError: invalid literal for float(): fish >>> a['b'] = '1' >>> a.as_float('b') 1.0 >>> a['b'] = '3.2' >>> a.as_float('b') 3.2000000000000002 """ return float(self[key]) def as_list(self, key): """ A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1] """ result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result] def restore_default(self, key): """ Restore (and return) default value for the specified key. This method will only work for a ConfigObj that was created with a configspec and has been validated. If there is no default value for this key, ``KeyError`` is raised. """ default = self.default_values[key] dict.__setitem__(self, key, default) if key not in self.defaults: self.defaults.append(key) return default def restore_defaults(self): """ Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values. """ for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults() class ConfigObj(Section): """An object to read, create, and write config files.""" _keyword = re.compile(r'''^ # line start (\s*) # indentation ( # keyword (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"=].*?) # no quotes ) \s*=\s* # divider (.*) # value (including list values and comments) $ # line end ''', re.VERBOSE) _sectionmarker = re.compile(r'''^ (\s*) # 1: indentation ((?:\[\s*)+) # 2: section marker open ( # 3: section name open (?:"\s*\S.*?\s*")| # at least one non-space with double quotes (?:'\s*\S.*?\s*')| # at least one non-space with single quotes (?:[^'"\s].*?) # at least one non-space unquoted ) # section name close ((?:\s*\])+) # 4: section marker close \s*(\#.*)? # 5: optional comment $''', re.VERBOSE) # this regexp pulls list values out as a single string # or single values and comments # FIXME: this regex adds a '' to the end of comma terminated lists # workaround in ``_handle_value`` _valueexp = re.compile(r'''^ (?: (?: ( (?: (?: (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#][^,\#]*?) # unquoted ) \s*,\s* # comma )* # match all list items ending in a comma (if any) ) ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#\s][^,]*?)| # unquoted (?:(?<!,)) # Empty value )? # last item in a list - or string value )| (,) # alternatively a single comma - empty list ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # use findall to get the members of a list value _listvalueexp = re.compile(r''' ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'",\#].*?) # unquoted ) \s*,\s* # comma ''', re.VERBOSE) # this regexp is used for the value # when lists are switched off _nolistvalue = re.compile(r'''^ ( (?:".*?")| # double quotes (?:'.*?')| # single quotes (?:[^'"\#].*?)| # unquoted (?:) # Empty value ) \s*(\#.*)? # optional comment $''', re.VERBOSE) # regexes for finding triple quoted values on one line _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') _triple_quote = { "'''": (_single_line_single, _multi_line_single), '"""': (_single_line_double, _multi_line_double), } # Used by the ``istrue`` Section method _bools = { 'yes': True, 'no': False, 'on': True, 'off': False, '1': True, '0': False, 'true': True, 'false': False, } def __init__(self, infile=None, options=None, _inspec=False, **kwargs): """ Parse a config file or create a config file object. ``ConfigObj(infile=None, options=None, **kwargs)`` """ self._inspec = _inspec # init the superclass Section.__init__(self, self, 0, self) infile = infile or [] options = dict(options or {}) # keyword arguments take precedence over an options dictionary options.update(kwargs) if _inspec: options['list_values'] = False defaults = OPTION_DEFAULTS.copy() # TODO: check the values too. for entry in options: if entry not in defaults: raise TypeError('Unrecognised option "%s".' % entry) # Add any explicit options to the defaults defaults.update(options) self._initialise(defaults) configspec = defaults['configspec'] self._original_configspec = configspec self._load(infile, configspec) def _load(self, infile, configspec): if isinstance(infile, basestring): self.filename = infile if os.path.isfile(infile): h = open(infile, 'rb') infile = h.read() or [] h.close() elif self.file_error: # raise an error if the file doesn't exist raise IOError('Config file not found: "%s".' % self.filename) else: # file doesn't already exist if self.create_empty: # this is a good test that the filename specified # isn't impossible - like on a non-existent device h = open(infile, 'w') h.write('') h.close() infile = [] elif isinstance(infile, (list, tuple)): infile = list(infile) elif isinstance(infile, dict): # initialise self # the Section class handles creating subsections if isinstance(infile, ConfigObj): # get a copy of our ConfigObj infile = infile.dict() for entry in infile: self[entry] = infile[entry] del self._errors if configspec is not None: self._handle_configspec(configspec) else: self.configspec = None return elif getattr(infile, 'read', MISSING) is not MISSING: # This supports file like objects infile = infile.read() or [] # needs splitting into lines - but needs doing *after* decoding # in case it's not an 8 bit encoding else: raise TypeError('infile must be a filename, file like object, or list of lines.') if infile: # don't do it for the empty ConfigObj infile = self._handle_bom(infile) # infile is now *always* a list # # Set the newlines attribute (first line ending it finds) # and strip trailing '\n' or '\r' from lines for line in infile: if (not line) or (line[-1] not in ('\r', '\n', '\r\n')): continue for end in ('\r\n', '\n', '\r'): if line.endswith(end): self.newlines = end break break infile = [line.rstrip('\r\n') for line in infile] self._parse(infile) # if we had any errors, now is the time to raise them if self._errors: info = "at line %s." % self._errors[0].line_number if len(self._errors) > 1: msg = "Parsing failed with several errors.\nFirst error %s" % info error = ConfigObjError(msg) else: error = self._errors[0] # set the errors attribute; it's a list of tuples: # (error_type, message, line_number) error.errors = self._errors # set the config attribute error.config = self raise error # delete private attributes del self._errors if configspec is None: self.configspec = None else: self._handle_configspec(configspec) def _initialise(self, options=None): if options is None: options = OPTION_DEFAULTS # initialise a few variables self.filename = None self._errors = [] self.raise_errors = options['raise_errors'] self.interpolation = options['interpolation'] self.list_values = options['list_values'] self.create_empty = options['create_empty'] self.file_error = options['file_error'] self.stringify = options['stringify'] self.indent_type = options['indent_type'] self.encoding = options['encoding'] self.default_encoding = options['default_encoding'] self.BOM = False self.newlines = None self.write_empty_values = options['write_empty_values'] self.unrepr = options['unrepr'] self.initial_comment = [] self.final_comment = [] self.configspec = None if self._inspec: self.list_values = False # Clear section attributes as well Section._initialise(self) def __repr__(self): return ('ConfigObj({%s})' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) for key in (self.scalars + self.sections)])) def _handle_bom(self, infile): """ Handle any BOM, and decode if necessary. If an encoding is specified, that *must* be used - but the BOM should still be removed (and the BOM attribute set). (If the encoding is wrongly specified, then a BOM for an alternative encoding won't be discovered or removed.) If an encoding is not specified, UTF8 or UTF16 BOM will be detected and removed. The BOM attribute will be set. UTF16 will be decoded to unicode. NOTE: This method must not be called with an empty ``infile``. Specifying the *wrong* encoding is likely to cause a ``UnicodeDecodeError``. ``infile`` must always be returned as a list of lines, but may be passed in as a single string. """ if ((self.encoding is not None) and (self.encoding.lower() not in BOM_LIST)): # No need to check for a BOM # the encoding specified doesn't have one # just decode return self._decode(infile, self.encoding) if isinstance(infile, (list, tuple)): line = infile[0] else: line = infile if self.encoding is not None: # encoding explicitly supplied # And it could have an associated BOM # TODO: if encoding is just UTF16 - we ought to check for both # TODO: big endian and little endian versions. enc = BOM_LIST[self.encoding.lower()] if enc == 'utf_16': # For UTF16 we try big endian and little endian for BOM, (encoding, final_encoding) in BOMS.items(): if not final_encoding: # skip UTF8 continue if infile.startswith(BOM): ### BOM discovered ##self.BOM = True # Don't need to remove BOM return self._decode(infile, encoding) # If we get this far, will *probably* raise a DecodeError # As it doesn't appear to start with a BOM return self._decode(infile, self.encoding) # Must be UTF8 BOM = BOM_SET[enc] if not line.startswith(BOM): return self._decode(infile, self.encoding) newline = line[len(BOM):] # BOM removed if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline self.BOM = True return self._decode(infile, self.encoding) # No encoding specified - so we need to check for UTF8/UTF16 for BOM, (encoding, final_encoding) in BOMS.items(): if not line.startswith(BOM): continue else: # BOM discovered self.encoding = final_encoding if not final_encoding: self.BOM = True # UTF8 # remove BOM newline = line[len(BOM):] if isinstance(infile, (list, tuple)): infile[0] = newline else: infile = newline # UTF8 - don't decode if isinstance(infile, basestring): return infile.splitlines(True) else: return infile # UTF16 - have to decode return self._decode(infile, encoding) # No BOM discovered and no encoding specified, just return if isinstance(infile, basestring): # infile read from a file will be a single string return infile.splitlines(True) return infile def _a_to_u(self, aString): """Decode ASCII strings to unicode if a self.encoding is specified.""" if self.encoding: return aString.decode('ascii') else: return aString def _decode(self, infile, encoding): """ Decode infile to unicode. Using the specified encoding. if is a string, it also needs converting to a list. """ if isinstance(infile, basestring): # can't be unicode # NOTE: Could raise a ``UnicodeDecodeError`` return infile.decode(encoding).splitlines(True) for i, line in enumerate(infile): if not isinstance(line, unicode): # NOTE: The isinstance test here handles mixed lists of unicode/string # NOTE: But the decode will break on any non-string values # NOTE: Or could raise a ``UnicodeDecodeError`` infile[i] = line.decode(encoding) return infile def _decode_element(self, line): """Decode element to unicode if necessary.""" if not self.encoding: return line if isinstance(line, str) and self.default_encoding: return line.decode(self.default_encoding) return line def _str(self, value): """ Used by ``stringify`` within validate, to turn non-string values into strings. """ if not isinstance(value, basestring): return str(value) else: return value def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if parent.has_key(sect_name): self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: (value, comment, cur_index) = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception, e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if this_section.has_key(key): self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values def _match_depth(self, sect, depth): """ Given a section and a depth level, walk back through the sections parents to see if the depth level matches a previous section. Return a reference to the right section, or raise a SyntaxError. """ while depth < sect.depth: if sect is sect.parent: # we've reached the top level already raise SyntaxError() sect = sect.parent if sect.depth == depth: return sect # shouldn't get here raise SyntaxError() def _handle_error(self, text, ErrorClass, infile, cur_index): """ Handle an error according to the error settings. Either raise the error or store it. The error will have occured at ``cur_index`` """ line = infile[cur_index] cur_index += 1 message = text % cur_index error = ErrorClass(message, cur_index, line) if self.raise_errors: # raise the error - parsing stops here raise error # store the error # reraise when parsing has finished self._errors.append(error) def _unquote(self, value): """Return an unquoted version of a value""" if (value[0] == value[-1]) and (value[0] in ('"', "'")): value = value[1:-1] return value def _quote(self, value, multiline=True): """ Return a safely quoted version of a value. Raise a ConfigObjError if the value cannot be safely quoted. If multiline is ``True`` (default) then use triple quotes if necessary. * Don't quote values that don't need it. * Recursively quote members of a list and return a comma joined list. * Multiline is ``False`` for lists. * Obey list syntax for empty and single member lists. If ``list_values=False`` then the value is only quoted if it contains a ``\\n`` (is multiline) or '#'. If ``write_empty_values`` is set, and the value is an empty string, it won't be quoted. """ if multiline and self.write_empty_values and value == '': # Only if multiline is set, so that it is used for values not # keys, and not values that are part of a list return '' if multiline and isinstance(value, (list, tuple)): if not value: return ',' elif len(value) == 1: return self._quote(value[0], multiline=False) + ',' return ', '.join([self._quote(val, multiline=False) for val in value]) if not isinstance(value, basestring): if self.stringify: value = str(value) else: raise TypeError('Value "%s" is not a string.' % value) if not value: return '""' no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value)) hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value) check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote if check_for_single: if not self.list_values: # we don't quote if ``list_values=False`` quot = noquot # for normal values either single or double quotes will do elif '\n' in value: # will only happen if multiline is off - e.g. '\n' in key raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif ((value[0] not in wspace_plus) and (value[-1] not in wspace_plus) and (',' not in value)): quot = noquot else: quot = self._get_single_quote(value) else: # if value has '\n' or "'" *and* '"', it will need triple quotes quot = self._get_triple_quote(value) if quot == noquot and '#' in value and self.list_values: quot = self._get_single_quote(value) return quot % value def _get_single_quote(self, value): if ("'" in value) and ('"' in value): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) elif '"' in value: quot = squot else: quot = dquot return quot def _get_triple_quote(self, value): if (value.find('"""') != -1) and (value.find("'''") != -1): raise ConfigObjError('Value "%s" cannot be safely quoted.' % value) if value.find('"""') == -1: quot = tdquot else: quot = tsquot return quot def _handle_value(self, value): """ Given a value string, unquote, remove comment, handle lists. (including empty and single member lists) """ if self._inspec: # Parsing a configspec so don't handle comments return (value, '') # do we look for lists in values ? if not self.list_values: mat = self._nolistvalue.match(value) if mat is None: raise SyntaxError() # NOTE: we don't unquote here return mat.groups() # mat = self._valueexp.match(value) if mat is None: # the value is badly constructed, probably badly quoted, # or an invalid list raise SyntaxError() (list_values, single, empty_list, comment) = mat.groups() if (list_values == '') and (single is None): # change this if you want to accept empty values raise SyntaxError() # NOTE: note there is no error handling from here if the regex # is wrong: then incorrect values will slip through if empty_list is not None: # the single comma - meaning an empty list return ([], comment) if single is not None: # handle empty values if list_values and not single: # FIXME: the '' is a workaround because our regex now matches # '' at the end of a list if it has a trailing comma single = None else: single = single or '""' single = self._unquote(single) if list_values == '': # not a list value return (single, comment) the_list = self._listvalueexp.findall(list_values) the_list = [self._unquote(val) for val in the_list] if single is not None: the_list += [single] return (the_list, comment) def _multiline(self, value, infile, cur_index, maxline): """Extract the value, where we are in a multiline situation.""" quot = value[:3] newvalue = value[3:] single_line = self._triple_quote[quot][0] multi_line = self._triple_quote[quot][1] mat = single_line.match(value) if mat is not None: retval = list(mat.groups()) retval.append(cur_index) return retval elif newvalue.find(quot) != -1: # somehow the triple quote is missing raise SyntaxError() # while cur_index < maxline: cur_index += 1 newvalue += '\n' line = infile[cur_index] if line.find(quot) == -1: newvalue += line else: # end of multiline, process it break else: # we've got to the end of the config, oops... raise SyntaxError() mat = multi_line.match(line) if mat is None: # a badly formed line raise SyntaxError() (value, comment) = mat.groups() return (newvalue + value, comment, cur_index) def _handle_configspec(self, configspec): """Parse the configspec.""" # FIXME: Should we check that the configspec was created with the # correct settings ? (i.e. ``list_values=False``) if not isinstance(configspec, ConfigObj): try: configspec = ConfigObj(configspec, raise_errors=True, file_error=True, _inspec=True) except ConfigObjError, e: # FIXME: Should these errors have a reference # to the already parsed ConfigObj ? raise ConfigspecError('Parsing configspec failed: %s' % e) except IOError, e: raise IOError('Reading configspec failed: %s' % e) self.configspec = configspec def _set_configspec(self, section, copy): """ Called by validate. Handles setting the configspec on subsections including sections to be validated by __many__ """ configspec = section.configspec many = configspec.get('__many__') if isinstance(many, dict): for entry in section.sections: if entry not in configspec: section[entry].configspec = many for entry in configspec.sections: if entry == '__many__': continue if entry not in section: section[entry] = {} if copy: # copy comments section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') # Could be a scalar when we expect a section if isinstance(section[entry], Section): section[entry].configspec = configspec[entry] def _write_line(self, indent_string, entry, this_entry, comment): """Write an individual line, for the write method""" # NOTE: the calls to self._quote here handles non-StringType values. if not self.unrepr: val = self._decode_element(self._quote(this_entry)) else: val = repr(this_entry) return '%s%s%s%s%s' % (indent_string, self._decode_element(self._quote(entry, multiline=False)), self._a_to_u(' = '), val, self._decode_element(comment)) def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment)) def _handle_comment(self, comment): """Deal with a comment.""" if not comment: return '' start = self.indent_type if not comment.startswith('#'): start += self._a_to_u(' # ') return (start + comment) # Public methods def write(self, outfile=None, section=None): """ Write the current ConfigObj as a file tekNico: FIXME: use StringIO instead of real files >>> filename = a.filename >>> a.filename = 'test.ini' >>> a.write() >>> a.filename = filename >>> a == ConfigObj('test.ini', raise_errors=True) 1 """ if self.indent_type is None: # this can be true if initialised from a dictionary self.indent_type = DEFAULT_INDENT_TYPE out = [] cs = self._a_to_u('#') csp = self._a_to_u('# ') if section is None: int_val = self.interpolation self.interpolation = False section = self for line in self.initial_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) indent_string = self.indent_type * section.depth for entry in (section.scalars + section.sections): if entry in section.defaults: # don't write out default values continue for comment_line in section.comments[entry]: comment_line = self._decode_element(comment_line.lstrip()) if comment_line and not comment_line.startswith(cs): comment_line = csp + comment_line out.append(indent_string + comment_line) this_entry = section[entry] comment = self._handle_comment(section.inline_comments[entry]) if isinstance(this_entry, dict): # a section out.append(self._write_marker( indent_string, this_entry.depth, entry, comment)) out.extend(self.write(section=this_entry)) else: out.append(self._write_line( indent_string, entry, this_entry, comment)) if section is self: for line in self.final_comment: line = self._decode_element(line) stripped_line = line.strip() if stripped_line and not stripped_line.startswith(cs): line = csp + line out.append(line) self.interpolation = int_val if section is not self: return out if (self.filename is None) and (outfile is None): # output a list of lines # might need to encode # NOTE: This will *screw* UTF16, each line will start with the BOM if self.encoding: out = [l.encode(self.encoding) for l in out] if (self.BOM and ((self.encoding is None) or (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))): # Add the UTF8 BOM if not out: out.append('') out[0] = BOM_UTF8 + out[0] return out # Turn the list to a string, joined with correct newlines newline = self.newlines or os.linesep output = self._a_to_u(newline).join(out) if self.encoding: output = output.encode(self.encoding) if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)): # Add the UTF8 BOM output = BOM_UTF8 + output if not output.endswith(newline): output += newline if outfile is not None: outfile.write(output) else: h = open(self.filename, 'wb') h.write(output) h.close() def validate(self, validator, preserve_errors=False, copy=False, section=None): """ Test the ConfigObj against a configspec. It uses the ``validator`` object from *validate.py*. To run ``validate`` on the current ConfigObj, call: :: test = config.validate(validator) (Normally having previously passed in the configspec when the ConfigObj was created - you can dynamically assign a dictionary of checks to the ``configspec`` attribute of a section though). It returns ``True`` if everything passes, or a dictionary of pass/fails (True/False). If every member of a subsection passes, it will just have the value ``True``. (It also returns ``False`` if all members fail). In addition, it converts the values from strings to their native types if their checks pass (and ``stringify`` is set). If ``preserve_errors`` is ``True`` (``False`` is default) then instead of a marking a fail with a ``False``, it will preserve the actual exception object. This can contain info about the reason for failure. For example the ``VdtValueTooSmallError`` indicates that the value supplied was too small. If a value (or section) is missing it will still be marked as ``False``. You must have the validate module to use ``preserve_errors=True``. You can then use the ``flatten_errors`` function to turn your nested results dictionary into a flattened list of failures - useful for displaying meaningful error messages. """ if section is None: if self.configspec is None: raise ValueError('No configspec supplied.') if preserve_errors: # We do this once to remove a top level dependency on the validate module # Which makes importing configobj faster from validate import VdtMissingValue self._vdtMissingValue = VdtMissingValue section = self if copy: section.initial_comment = section.configspec.initial_comment section.final_comment = section.configspec.final_comment section.encoding = section.configspec.encoding section.BOM = section.configspec.BOM section.newlines = section.configspec.newlines section.indent_type = section.configspec.indent_type # configspec = section.configspec self._set_configspec(section, copy) def validate_entry(entry, spec, val, missing, ret_true, ret_false): try: check = validator.check(spec, val, missing=missing ) except validator.baseErrorClass, e: if not preserve_errors or isinstance(e, self._vdtMissingValue): out[entry] = False else: # preserve the error out[entry] = e ret_false = False ret_true = False else: try: section.default_values.pop(entry, None) except AttributeError: # For Python 2.2 compatibility try: del section.default_values[entry] except KeyError: pass try: section.default_values[entry] = validator.get_default_value(configspec[entry]) except (KeyError, AttributeError): # No default or validator has no 'get_default_value' (e.g. SimpleVal) pass ret_false = False out[entry] = True if self.stringify or missing: # if we are doing type conversion # or the value is a supplied default if not self.stringify: if isinstance(check, (list, tuple)): # preserve lists check = [self._str(item) for item in check] elif missing and check is None: # convert the None from a default to a '' check = '' else: check = self._str(check) if (check != val) or missing: section[entry] = check if not copy and missing and entry not in section.defaults: section.defaults.append(entry) return ret_true, ret_false # out = {} ret_true = True ret_false = True unvalidated = [k for k in section.scalars if k not in configspec] incorrect_sections = [k for k in configspec.sections if k in section.scalars] incorrect_scalars = [k for k in configspec.scalars if k in section.sections] for entry in configspec.scalars: if entry in ('__many__', '___many___'): # reserved names continue if (not entry in section.scalars) or (entry in section.defaults): # missing entries # or entries from defaults missing = True val = None if copy and not entry in section.scalars: # copy comments section.comments[entry] = ( configspec.comments.get(entry, [])) section.inline_comments[entry] = ( configspec.inline_comments.get(entry, '')) # else: missing = False val = section[entry] ret_true, ret_false = validate_entry(entry, configspec[entry], val, missing, ret_true, ret_false) many = None if '__many__' in configspec.scalars: many = configspec['__many__'] elif '___many___' in configspec.scalars: many = configspec['___many___'] if many is not None: for entry in unvalidated: val = section[entry] ret_true, ret_false = validate_entry(entry, many, val, False, ret_true, ret_false) for entry in incorrect_scalars: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Value %r was provided as a section' % entry out[entry] = validator.baseErrorClass(msg) for entry in incorrect_sections: ret_true = False if not preserve_errors: out[entry] = False else: ret_false = False msg = 'Section %r was provided as a single value' % entry out[entry] = validator.baseErrorClass(msg) # Missing sections will have been created as empty ones when the # configspec was read. for entry in section.sections: # FIXME: this means DEFAULT is not copied in copy mode if section is self and entry == 'DEFAULT': continue if section[entry].configspec is None: continue if copy: section.comments[entry] = configspec.comments.get(entry, []) section.inline_comments[entry] = configspec.inline_comments.get(entry, '') check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry]) out[entry] = check if check == False: ret_true = False elif check == True: ret_false = False else: ret_true = False ret_false = False # if ret_true: return True elif ret_false: return False return out def reset(self): """Clear ConfigObj instance and restore to 'freshly created' state.""" self.clear() self._initialise() # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload) # requires an empty dictionary self.configspec = None # Just to be sure ;-) self._original_configspec = None def reload(self): """ Reload a ConfigObj from file. This method raises a ``ReloadError`` if the ConfigObj doesn't have a filename attribute pointing to a file. """ if not isinstance(self.filename, basestring): raise ReloadError() filename = self.filename current_options = {} for entry in OPTION_DEFAULTS: if entry == 'configspec': continue current_options[entry] = getattr(self, entry) configspec = self._original_configspec current_options['configspec'] = configspec self.clear() self._initialise(current_options) self._load(filename, configspec) class SimpleVal(object): """ A simple validator. Can be used to check that all members expected are present. To use it, provide a configspec with all your members in (the value given will be ignored). Pass an instance of ``SimpleVal`` to the ``validate`` method of your ``ConfigObj``. ``validate`` will return ``True`` if all members are present, or a dictionary with True/False meaning present/missing. (Whole missing sections will be replaced with ``False``) """ def __init__(self): self.baseErrorClass = ConfigObjError def check(self, check, member, missing=False): """A dummy check method, always returns the value unchanged.""" if missing: raise self.baseErrorClass() return member # Check / processing functions for options def flatten_errors(cfg, res, levels=None, results=None): """ An example function that will turn a nested dictionary of results (as returned by ``ConfigObj.validate``) into a flat list. ``cfg`` is the ConfigObj instance being checked, ``res`` is the results dictionary returned by ``validate``. (This is a recursive function, so you shouldn't use the ``levels`` or ``results`` arguments - they are used by the function.) Returns a list of keys that failed. Each member of the list is a tuple : :: ([list of sections...], key, result) If ``validate`` was called with ``preserve_errors=False`` (the default) then ``result`` will always be ``False``. *list of sections* is a flattened list of sections that the key was found in. If the section was missing (or a section was expected and a scalar provided - or vice-versa) then key will be ``None``. If the value (or section) was missing then ``result`` will be ``False``. If ``validate`` was called with ``preserve_errors=True`` and a value was present, but failed the check, then ``result`` will be the exception object returned. You can use this as a string that describes the failure. For example *The value "3" is of the wrong type*. >>> import validate >>> vtor = validate.Validator() >>> my_ini = ''' ... option1 = True ... [section1] ... option1 = True ... [section2] ... another_option = Probably ... [section3] ... another_option = True ... [[section3b]] ... value = 3 ... value2 = a ... value3 = 11 ... ''' >>> my_cfg = ''' ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section1] ... option1 = boolean() ... option2 = boolean() ... option3 = boolean(default=Bad_value) ... [section2] ... another_option = boolean() ... [section3] ... another_option = boolean() ... [[section3b]] ... value = integer ... value2 = integer ... value3 = integer(0, 10) ... [[[section3b-sub]]] ... value = string ... [section4] ... another_option = boolean() ... ''' >>> cs = my_cfg.split('\\n') >>> ini = my_ini.split('\\n') >>> cfg = ConfigObj(ini, configspec=cs) >>> res = cfg.validate(vtor, preserve_errors=True) >>> errors = [] >>> for entry in flatten_errors(cfg, res): ... section_list, key, error = entry ... section_list.insert(0, '[root]') ... if key is not None: ... section_list.append(key) ... else: ... section_list.append('[missing]') ... section_string = ', '.join(section_list) ... errors.append((section_string, ' = ', error)) >>> errors.sort() >>> for entry in errors: ... print entry[0], entry[1], (entry[2] or 0) [root], option2 = 0 [root], option3 = the value "Bad_value" is of the wrong type. [root], section1, option2 = 0 [root], section1, option3 = the value "Bad_value" is of the wrong type. [root], section2, another_option = the value "Probably" is of the wrong type. [root], section3, section3b, section3b-sub, [missing] = 0 [root], section3, section3b, value2 = the value "a" is of the wrong type. [root], section3, section3b, value3 = the value "11" is too big. [root], section4, [missing] = 0 """ if levels is None: # first time called levels = [] results = [] if res is True: return results if res is False or isinstance(res, Exception): results.append((levels[:], None, res)) if levels: levels.pop() return results for (key, val) in res.items(): if val == True: continue if isinstance(cfg.get(key), dict): # Go down one level levels.append(key) flatten_errors(cfg[key], val, levels, results) continue results.append((levels[:], key, val)) # # Go up one level if levels: levels.pop() # return results """*A programming language is a medium of expression.* - Paul Graham"""
86,447
Python
.py
2,066
29.533882
114
0.539777
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,387
certgen.py
midgetspy_Sick-Beard/lib/certgen.py
# -*- coding: latin-1 -*- # # Copyright (C) Martin Sjögren and AB Strakt 2001, All rights reserved # Copyright (C) Jean-Paul Calderone 2008, All rights reserved # This file is licenced under the GNU LESSER GENERAL PUBLIC LICENSE Version 2.1 or later (aka LGPL v2.1) # Please see LGPL2.1.txt for more information """ Certificate generation module. """ from OpenSSL import crypto import time TYPE_RSA = crypto.TYPE_RSA TYPE_DSA = crypto.TYPE_DSA serial = int(time.time()) def createKeyPair(type, bits): """ Create a public/private key pair. Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA bits - Number of bits to use in the key Returns: The public/private key pair in a PKey object """ pkey = crypto.PKey() pkey.generate_key(type, bits) return pkey def createCertRequest(pkey, digest="md5", **name): """ Create a certificate request. Arguments: pkey - The key to associate with the request digest - Digestion method to use for signing, default is md5 **name - The name of the subject of the request, possible arguments are: C - Country name ST - State or province name L - Locality name O - Organization name OU - Organizational unit name CN - Common name emailAddress - E-mail address Returns: The certificate request in an X509Req object """ req = crypto.X509Req() subj = req.get_subject() for (key,value) in name.items(): setattr(subj, key, value) req.set_pubkey(pkey) req.sign(pkey, digest) return req def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="md5"): """ Generate a certificate given a certificate request. Arguments: req - Certificate reqeust to use issuerCert - The certificate of the issuer issuerKey - The private key of the issuer serial - Serial number for the certificate notBefore - Timestamp (relative to now) when the certificate starts being valid notAfter - Timestamp (relative to now) when the certificate stops being valid digest - Digest method to use for signing, default is md5 Returns: The signed certificate in an X509 object """ cert = crypto.X509() cert.set_serial_number(serial) cert.gmtime_adj_notBefore(notBefore) cert.gmtime_adj_notAfter(notAfter) cert.set_issuer(issuerCert.get_subject()) cert.set_subject(req.get_subject()) cert.set_pubkey(req.get_pubkey()) cert.sign(issuerKey, digest) return cert
2,892
Python
.py
70
32.471429
104
0.621708
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,388
setup.py
midgetspy_Sick-Beard/lib/tvdb_api/setup.py
from setuptools import setup setup( name = 'tvdb_api', version='1.9', author='dbr/Ben', description='Interface to thetvdb.com', url='http://github.com/dbr/tvdb_api/tree/master', license='unlicense', long_description="""\ An easy to use API interface to TheTVDB.com Basic usage is: >>> import tvdb_api >>> t = tvdb_api.Tvdb() >>> ep = t['My Name Is Earl'][1][22] >>> ep <Episode 01x22 - Stole a Badge> >>> ep['episodename'] u'Stole a Badge' """, py_modules = ['tvdb_api', 'tvdb_ui', 'tvdb_exceptions', 'tvdb_cache'], classifiers=[ "Intended Audience :: Developers", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Multimedia", "Topic :: Utilities", "Topic :: Software Development :: Libraries :: Python Modules", ] )
814
Python
.py
30
25.033333
70
0.684211
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,389
tvdb_ui.py
midgetspy_Sick-Beard/lib/tvdb_api/tvdb_ui.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """Contains included user interfaces for Tvdb show selection. A UI is a callback. A class, it's __init__ function takes two arguments: - config, which is the Tvdb config dict, setup in tvdb_api.py - log, which is Tvdb's logger instance (which uses the logging module). You can call log.info() log.warning() etc It must have a method "selectSeries", this is passed a list of dicts, each dict contains the the keys "name" (human readable show name), and "sid" (the shows ID as on thetvdb.com). For example: [{'name': u'Lost', 'sid': u'73739'}, {'name': u'Lost Universe', 'sid': u'73181'}] The "selectSeries" method must return the appropriate dict, or it can raise tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show cannot be found). A simple example callback, which returns a random series: >>> import random >>> from tvdb_ui import BaseUI >>> class RandomUI(BaseUI): ... def selectSeries(self, allSeries): ... import random ... return random.choice(allSeries) Then to use it.. >>> from tvdb_api import Tvdb >>> t = Tvdb(custom_ui = RandomUI) >>> random_matching_series = t['Lost'] >>> type(random_matching_series) <class 'tvdb_api.Show'> """ __author__ = "dbr/Ben" __version__ = "1.9" import logging import warnings from tvdb_exceptions import tvdb_userabort def log(): return logging.getLogger(__name__) class BaseUI: """Default non-interactive UI, which auto-selects first results """ def __init__(self, config, log = None): self.config = config if log is not None: warnings.warn("the UI's log parameter is deprecated, instead use\n" "use import logging; logging.getLogger('ui').info('blah')\n" "The self.log attribute will be removed in the next version") self.log = logging.getLogger(__name__) def selectSeries(self, allSeries): return allSeries[0] class ConsoleUI(BaseUI): """Interactively allows the user to select a show from a console based UI """ def _displaySeries(self, allSeries, limit = 6): """Helper function, lists series with corresponding ID """ if limit is not None: toshow = allSeries[:limit] else: toshow = allSeries print "TVDB Search Results:" for i, cshow in enumerate(toshow): i_show = i + 1 # Start at more human readable number 1 (not 0) log().debug('Showing allSeries[%s], series %s)' % (i_show, allSeries[i]['seriesname'])) if i == 0: extra = " (default)" else: extra = "" print "%s -> %s [%s] # http://thetvdb.com/?tab=series&id=%s&lid=%s%s" % ( i_show, cshow['seriesname'].encode("UTF-8", "ignore"), cshow['language'].encode("UTF-8", "ignore"), str(cshow['id']), cshow['lid'], extra ) def selectSeries(self, allSeries): self._displaySeries(allSeries) if len(allSeries) == 1: # Single result, return it! print "Automatically selecting only result" return allSeries[0] if self.config['select_first'] is True: print "Automatically returning first search result" return allSeries[0] while True: # return breaks this loop try: print "Enter choice (first number, return for default, 'all', ? for help):" ans = raw_input() except KeyboardInterrupt: raise tvdb_userabort("User aborted (^c keyboard interupt)") except EOFError: raise tvdb_userabort("User aborted (EOF received)") log().debug('Got choice of: %s' % (ans)) try: selected_id = int(ans) - 1 # The human entered 1 as first result, not zero except ValueError: # Input was not number if len(ans.strip()) == 0: # Default option log().debug('Default option, returning first series') return allSeries[0] if ans == "q": log().debug('Got quit command (q)') raise tvdb_userabort("User aborted ('q' quit command)") elif ans == "?": print "## Help" print "# Enter the number that corresponds to the correct show." print "# a - display all results" print "# all - display all results" print "# ? - this help" print "# q - abort tvnamer" print "# Press return with no input to select first result" elif ans.lower() in ["a", "all"]: self._displaySeries(allSeries, limit = None) else: log().debug('Unknown keypress %s' % (ans)) else: log().debug('Trying to return ID: %d' % (selected_id)) try: return allSeries[selected_id] except IndexError: log().debug('Invalid show number entered!') print "Invalid number (%s) selected!" self._displaySeries(allSeries)
5,493
Python
.py
126
33.388889
99
0.57603
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,390
tvdb_exceptions.py
midgetspy_Sick-Beard/lib/tvdb_api/tvdb_exceptions.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """Custom exceptions used or raised by tvdb_api """ __author__ = "dbr/Ben" __version__ = "1.9" __all__ = ["tvdb_error", "tvdb_userabort", "tvdb_shownotfound", "tvdb_seasonnotfound", "tvdb_episodenotfound", "tvdb_attributenotfound"] class tvdb_exception(Exception): """Any exception generated by tvdb_api """ pass class tvdb_error(tvdb_exception): """An error with thetvdb.com (Cannot connect, for example) """ pass class tvdb_userabort(tvdb_exception): """User aborted the interactive selection (via the q command, ^c etc) """ pass class tvdb_shownotfound(tvdb_exception): """Show cannot be found on thetvdb.com (non-existant show) """ pass class tvdb_seasonnotfound(tvdb_exception): """Season cannot be found on thetvdb.com """ pass class tvdb_episodenotfound(tvdb_exception): """Episode cannot be found on thetvdb.com """ pass class tvdb_attributenotfound(tvdb_exception): """Raised if an episode does not have the requested attribute (such as a episode name) """ pass
1,231
Python
.py
42
25.880952
72
0.707379
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,391
tvdb_cache.py
midgetspy_Sick-Beard/lib/tvdb_api/tvdb_cache.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """ urllib2 caching handler Modified from http://code.activestate.com/recipes/491261/ """ from __future__ import with_statement __author__ = "dbr/Ben" __version__ = "1.9" import os import time import errno import httplib import urllib2 import StringIO from hashlib import md5 from threading import RLock cache_lock = RLock() def locked_function(origfunc): """Decorator to execute function under lock""" def wrapped(*args, **kwargs): cache_lock.acquire() try: return origfunc(*args, **kwargs) finally: cache_lock.release() return wrapped def calculate_cache_path(cache_location, url): """Checks if [cache_location]/[hash_of_url].headers and .body exist """ thumb = md5(url).hexdigest() header = os.path.join(cache_location, thumb + ".headers") body = os.path.join(cache_location, thumb + ".body") return header, body def check_cache_time(path, max_age): """Checks if a file has been created/modified in the [last max_age] seconds. False means the file is too old (or doesn't exist), True means it is up-to-date and valid""" if not os.path.isfile(path): return False cache_modified_time = os.stat(path).st_mtime time_now = time.time() if cache_modified_time < time_now - max_age: # Cache is old return False else: return True @locked_function def exists_in_cache(cache_location, url, max_age): """Returns if header AND body cache file exist (and are up-to-date)""" hpath, bpath = calculate_cache_path(cache_location, url) if os.path.exists(hpath) and os.path.exists(bpath): return( check_cache_time(hpath, max_age) and check_cache_time(bpath, max_age) ) else: # File does not exist return False @locked_function def store_in_cache(cache_location, url, response): """Tries to store response in cache.""" hpath, bpath = calculate_cache_path(cache_location, url) try: outf = open(hpath, "wb") headers = str(response.info()) outf.write(headers) outf.close() outf = open(bpath, "wb") outf.write(response.read()) outf.close() except IOError: return True else: return False @locked_function def delete_from_cache(cache_location, url): """Deletes a response in cache.""" hpath, bpath = calculate_cache_path(cache_location, url) try: if os.path.exists(hpath): os.remove(hpath) if os.path.exists(bpath): os.remove(bpath) except IOError: return True else: return False class CacheHandler(urllib2.BaseHandler): """Stores responses in a persistant on-disk cache. If a subsequent GET request is made for the same URL, the stored response is returned, saving time, resources and bandwidth """ @locked_function def __init__(self, cache_location, max_age = 21600): """The location of the cache directory""" self.max_age = max_age self.cache_location = cache_location if not os.path.exists(self.cache_location): try: os.mkdir(self.cache_location) except OSError, e: if e.errno == errno.EEXIST and os.path.isdir(self.cache_location): # File exists, and it's a directory, # another process beat us to creating this dir, that's OK. pass else: # Our target dir is already a file, or different error, # relay the error! raise def default_open(self, request): """Handles GET requests, if the response is cached it returns it """ if request.get_method() is not "GET": return None # let the next handler try to handle the request if exists_in_cache( self.cache_location, request.get_full_url(), self.max_age ): return CachedResponse( self.cache_location, request.get_full_url(), set_cache_header = True ) else: return None def http_response(self, request, response): """Gets a HTTP response, if it was a GET request and the status code starts with 2 (200 OK etc) it caches it and returns a CachedResponse """ if (request.get_method() == "GET" and str(response.code).startswith("2") ): if 'x-local-cache' not in response.info(): # Response is not cached set_cache_header = store_in_cache( self.cache_location, request.get_full_url(), response ) else: set_cache_header = True return CachedResponse( self.cache_location, request.get_full_url(), set_cache_header = set_cache_header ) else: return response class CachedResponse(StringIO.StringIO): """An urllib2.response-like object for cached responses. To determine if a response is cached or coming directly from the network, check the x-local-cache header rather than the object type. """ @locked_function def __init__(self, cache_location, url, set_cache_header=True): self.cache_location = cache_location hpath, bpath = calculate_cache_path(cache_location, url) StringIO.StringIO.__init__(self, file(bpath, "rb").read()) self.url = url self.code = 200 self.msg = "OK" headerbuf = file(hpath, "rb").read() if set_cache_header: headerbuf += "x-local-cache: %s\r\n" % (bpath) self.headers = httplib.HTTPMessage(StringIO.StringIO(headerbuf)) def info(self): """Returns headers """ return self.headers def geturl(self): """Returns original URL """ return self.url @locked_function def recache(self): new_request = urllib2.urlopen(self.url) set_cache_header = store_in_cache( self.cache_location, new_request.url, new_request ) CachedResponse.__init__(self, self.cache_location, self.url, True) @locked_function def delete_cache(self): delete_from_cache( self.cache_location, self.url ) if __name__ == "__main__": def main(): """Quick test/example of CacheHandler""" opener = urllib2.build_opener(CacheHandler("/tmp/")) response = opener.open("http://google.com") print response.headers print "Response:", response.read() response.recache() print response.headers print "After recache:", response.read() # Test usage in threads from threading import Thread class CacheThreadTest(Thread): lastdata = None def run(self): req = opener.open("http://google.com") newdata = req.read() if self.lastdata is None: self.lastdata = newdata assert self.lastdata == newdata, "Data was not consistent, uhoh" req.recache() threads = [CacheThreadTest() for x in range(50)] print "Starting threads" [t.start() for t in threads] print "..done" print "Joining threads" [t.join() for t in threads] print "..done" main()
7,784
Python
.py
221
26.538462
82
0.598724
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,392
tvdb_api.py
midgetspy_Sick-Beard/lib/tvdb_api/tvdb_api.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """Simple-to-use Python interface to The TVDB's API (thetvdb.com) Example usage: >>> from tvdb_api import Tvdb >>> t = Tvdb() >>> t['Lost'][4][11]['episodename'] u'Cabin Fever' """ __author__ = "dbr/Ben" __version__ = "1.9" import os import time import urllib import urllib2 import getpass import StringIO import tempfile import warnings import logging import datetime import zipfile try: import xml.etree.cElementTree as ElementTree except ImportError: import xml.etree.ElementTree as ElementTree try: import gzip except ImportError: gzip = None from tvdb_cache import CacheHandler from tvdb_ui import BaseUI, ConsoleUI from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound) lastTimeout = None def log(): return logging.getLogger("tvdb_api") class ShowContainer(dict): """Simple dict that holds a series of Show instances """ def __init__(self): self._stack = [] self._lastgc = time.time() def __setitem__(self, key, value): self._stack.append(key) #keep only the 100th latest results if time.time() - self._lastgc > 20: for o in self._stack[:-100]: del self[o] self._stack = self._stack[-100:] self._lastgc = time.time() super(ShowContainer, self).__setitem__(key, value) class Show(dict): """Holds a dict of seasons, and show data. """ def __init__(self): dict.__init__(self) self.data = {} def __repr__(self): return "<Show %s (containing %s seasons)>" % ( self.data.get(u'seriesname', 'instance'), len(self) ) def __getitem__(self, key): if key in self: # Key is an episode, return it return dict.__getitem__(self, key) if key in self.data: # Non-numeric request is for show-data return dict.__getitem__(self.data, key) # Data wasn't found, raise appropriate error if isinstance(key, int) or key.isdigit(): # Episode number x was not found raise tvdb_seasonnotfound("Could not find season %s" % (repr(key))) else: # If it's not numeric, it must be an attribute name, which # doesn't exist, so attribute error. raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key))) def airedOn(self, date): ret = self.search(str(date), 'firstaired') if len(ret) == 0: raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date) return ret def search(self, term = None, key = None): """ Search all episodes in show. Can search all data, or a specific key (for example, episodename) Always returns an array (can be empty). First index contains the first match, and so on. Each array index is an Episode() instance, so doing search_results[0]['episodename'] will retrieve the episode name of the first match. Search terms are converted to lower case (unicode) strings. # Examples These examples assume t is an instance of Tvdb(): >>> t = Tvdb() >>> To search for all episodes of Scrubs with a bit of data containing "my first day": >>> t['Scrubs'].search("my first day") [<Episode 01x01 - My First Day>] >>> Search for "My Name Is Earl" episode named "Faked His Own Death": >>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename') [<Episode 01x04 - Faked His Own Death>] >>> To search Scrubs for all episodes with "mentor" in the episode name: >>> t['scrubs'].search('mentor', key = 'episodename') [<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>] >>> # Using search results >>> results = t['Scrubs'].search("my first") >>> print results[0]['episodename'] My First Day >>> for x in results: print x['episodename'] My First Day My First Step My First Kill >>> """ results = [] for cur_season in self.values(): searchresult = cur_season.search(term = term, key = key) if len(searchresult) != 0: results.extend(searchresult) return results class Season(dict): def __init__(self, show = None): """The show attribute points to the parent show """ self.show = show def __repr__(self): return "<Season instance (containing %s episodes)>" % ( len(self.keys()) ) def __getitem__(self, episode_number): if episode_number not in self: raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number))) else: return dict.__getitem__(self, episode_number) def search(self, term = None, key = None): """Search all episodes in season, returns a list of matching Episode instances. >>> t = Tvdb() >>> t['scrubs'][1].search('first day') [<Episode 01x01 - My First Day>] >>> See Show.search documentation for further information on search """ results = [] for ep in self.values(): searchresult = ep.search(term = term, key = key) if searchresult is not None: results.append( searchresult ) return results class Episode(dict): def __init__(self, season = None): """The season attribute points to the parent season """ self.season = season def __repr__(self): seasno = int(self.get(u'seasonnumber', 0)) epno = int(self.get(u'episodenumber', 0)) epname = self.get(u'episodename') if epname is not None: return "<Episode %02dx%02d - %s>" % (seasno, epno, epname) else: return "<Episode %02dx%02d>" % (seasno, epno) def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key))) def search(self, term = None, key = None): """Search episode data for term, if it matches, return the Episode (self). The key parameter can be used to limit the search to a specific element, for example, episodename. This primarily for use use by Show.search and Season.search. See Show.search for further information on search Simple example: >>> e = Episode() >>> e['episodename'] = "An Example" >>> e.search("examp") <Episode 00x00 - An Example> >>> Limiting by key: >>> e.search("examp", key = "episodename") <Episode 00x00 - An Example> >>> """ if term == None: raise TypeError("must supply string to search for (contents)") term = unicode(term).lower() for cur_key, cur_value in self.items(): cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower() if key is not None and cur_key != key: # Do not search this key continue if cur_value.find( unicode(term).lower() ) > -1: return self class Actors(list): """Holds all Actor instances for a show """ pass class Actor(dict): """Represents a single actor. Should contain.. id, image, name, role, sortorder """ def __repr__(self): return "<Actor \"%s\">" % (self.get("name")) class Tvdb: """Create easy-to-use interface to name of season/episode name >>> t = Tvdb() >>> t['Scrubs'][1][24]['episodename'] u'My Last Day' """ def __init__(self, interactive = False, select_first = False, debug = False, cache = True, banners = False, actors = False, custom_ui = None, language = None, search_all_languages = False, apikey = None, forceConnect=False, useZip=False, dvdorder=False): """interactive (True/False): When True, uses built-in console UI is used to select the correct show. When False, the first search result is used. select_first (True/False): Automatically selects the first series search result (rather than showing the user a list of more than one series). Is overridden by interactive = False, or specifying a custom_ui debug (True/False) DEPRECATED: Replaced with proper use of logging module. To show debug messages: >>> import logging >>> logging.basicConfig(level = logging.DEBUG) cache (True/False/str/unicode/urllib2 opener): Retrieved XML are persisted to to disc. If true, stores in tvdb_api folder under your systems TEMP_DIR, if set to str/unicode instance it will use this as the cache location. If False, disables caching. Can also be passed an arbitrary Python object, which is used as a urllib2 opener, which should be created by urllib2.build_opener banners (True/False): Retrieves the banners for a show. These are accessed via the _banners key of a Show(), for example: >>> Tvdb(banners=True)['scrubs']['_banners'].keys() ['fanart', 'poster', 'series', 'season'] actors (True/False): Retrieves a list of the actors for a show. These are accessed via the _actors key of a Show(), for example: >>> t = Tvdb(actors=True) >>> t['scrubs']['_actors'][0]['name'] u'Zach Braff' custom_ui (tvdb_ui.BaseUI subclass): A callable subclass of tvdb_ui.BaseUI (overrides interactive option) language (2 character language abbreviation): The language of the returned data. Is also the language search uses. Default is "en" (English). For full list, run.. >>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS ['da', 'fi', 'nl', ...] search_all_languages (True/False): By default, Tvdb will only search in the language specified using the language option. When this is True, it will search for the show in and language apikey (str/unicode): Override the default thetvdb.com API key. By default it will use tvdb_api's own key (fine for small scripts), but you can use your own key if desired - this is recommended if you are embedding tvdb_api in a larger application) See http://thetvdb.com/?tab=apiregister to get your own key forceConnect (bool): If true it will always try to connect to theTVDB.com even if we recently timed out. By default it will wait one minute before trying again, and any requests within that one minute window will return an exception immediately. useZip (bool): Download the zip archive where possibale, instead of the xml. This is only used when all episodes are pulled. And only the main language xml is used, the actor and banner xml are lost. """ global lastTimeout # if we're given a lastTimeout that is less than 1 min just give up if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1): raise tvdb_error("We recently timed out, so giving up early this time") self.shows = ShowContainer() # Holds all Show classes self.corrections = {} # Holds show-name to show_id mapping self.config = {} if apikey is not None: self.config['apikey'] = apikey else: self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key self.config['debug_enabled'] = debug # show debugging messages self.config['custom_ui'] = custom_ui self.config['interactive'] = interactive # prompt for correct series? self.config['select_first'] = select_first self.config['search_all_languages'] = search_all_languages self.config['useZip'] = useZip self.config['dvdorder'] = dvdorder if cache is True: self.config['cache_enabled'] = True self.config['cache_location'] = self._getTempDir() self.urlopener = urllib2.build_opener( CacheHandler(self.config['cache_location']) ) elif cache is False: self.config['cache_enabled'] = False self.urlopener = urllib2.build_opener() # default opener with no caching elif isinstance(cache, basestring): self.config['cache_enabled'] = True self.config['cache_location'] = cache self.urlopener = urllib2.build_opener( CacheHandler(self.config['cache_location']) ) elif isinstance(cache, urllib2.OpenerDirector): # If passed something from urllib2.build_opener, use that log().debug("Using %r as urlopener" % cache) self.config['cache_enabled'] = True self.urlopener = cache else: raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache))) self.config['banners_enabled'] = banners self.config['actors_enabled'] = actors if self.config['debug_enabled']: warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. " "To enable debug messages, use the following code before importing: " "import logging; logging.basicConfig(level=logging.DEBUG)") logging.basicConfig(level=logging.DEBUG) # List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml # Hard-coded here as it is realtively static, and saves another HTTP request, as # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml self.config['valid_languages'] = [ "da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr", "ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no" ] # thetvdb.com should be based around numeric language codes, # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16 # requires the language ID, thus this mapping is required (mainly # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations) self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} if language is None: self.config['language'] = 'en' else: if language not in self.config['valid_languages']: raise ValueError("Invalid language %s, options are: %s" % ( language, self.config['valid_languages'] )) else: self.config['language'] = language # The following url_ configs are based of the # http://thetvdb.com/wiki/index.php/Programmers_API self.config['base_url'] = "http://thetvdb.com" if self.config['search_all_languages']: self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config else: self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config def _getTempDir(self): """Returns the [system temp dir]/tvdb_api-u501 (or tvdb_api-myuser) """ if hasattr(os, 'getuid'): uid = "u%d" % (os.getuid()) else: # For Windows try: uid = getpass.getuser() except ImportError: return os.path.join(tempfile.gettempdir(), "tvdb_api") return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid)) def _loadUrl(self, url, recache = False, language=None): global lastTimeout try: log().debug("Retrieving URL %s" % url) resp = self.urlopener.open(url) if 'x-local-cache' in resp.headers: log().debug("URL %s was cached in %s" % ( url, resp.headers['x-local-cache']) ) if recache: log().debug("Attempting to recache %s" % url) resp.recache() except (IOError, urllib2.URLError), errormsg: if not str(errormsg).startswith('HTTP Error'): lastTimeout = datetime.datetime.now() raise tvdb_error("Could not connect to server: %s" % (errormsg)) # handle gzipped content, # http://dbr.lighthouseapp.com/projects/13342/tickets/72-gzipped-data-patch if 'gzip' in resp.headers.get("Content-Encoding", ''): if gzip: stream = StringIO.StringIO(resp.read()) gz = gzip.GzipFile(fileobj=stream) return gz.read() raise tvdb_error("Received gzip data from thetvdb.com, but could not correctly handle it") if 'application/zip' in resp.headers.get("Content-Type", ''): try: # TODO: The zip contains actors.xml and banners.xml, which are currently ignored [GH-20] log().debug("We recived a zip file unpacking now ...") zipdata = StringIO.StringIO() zipdata.write(resp.read()) myzipfile = zipfile.ZipFile(zipdata) return myzipfile.read('%s.xml' % language) except zipfile.BadZipfile: if 'x-local-cache' in resp.headers: resp.delete_cache() raise tvdb_error("Bad zip file received from thetvdb.com, could not read it") return resp.read() def _getetsrc(self, url, language=None): """Loads a URL using caching, returns an ElementTree of the source """ src = self._loadUrl(url, language=language) try: # TVDB doesn't sanitize \r (CR) from user input in some fields, # remove it to avoid errors. Change from SickBeard, from will14m return ElementTree.fromstring(src.rstrip("\r")) except SyntaxError: src = self._loadUrl(url, recache=True, language=language) try: return ElementTree.fromstring(src.rstrip("\r")) except SyntaxError, exceptionmsg: errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % ( exceptionmsg ) if self.config['cache_enabled']: errormsg += "\nFirst try emptying the cache folder at..\n%s" % ( self.config['cache_location'] ) errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on" errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n" raise tvdb_error(errormsg) def _setItem(self, sid, seas, ep, attrib, value): """Creates a new episode, creating Show(), Season() and Episode()s as required. Called by _getShowData to populate show Since the nice-to-use tvdb[1][24]['name] interface makes it impossible to do tvdb[1][24]['name] = "name" and still be capable of checking if an episode exists so we can raise tvdb_shownotfound, we have a slightly less pretty method of setting items.. but since the API is supposed to be read-only, this is the best way to do it! The problem is that calling tvdb[1][24]['episodename'] = "name" calls __getitem__ on tvdb[1], there is no way to check if tvdb.__dict__ should have a key "1" before we auto-create it """ if sid not in self.shows: self.shows[sid] = Show() if seas not in self.shows[sid]: self.shows[sid][seas] = Season(show = self.shows[sid]) if ep not in self.shows[sid][seas]: self.shows[sid][seas][ep] = Episode(season = self.shows[sid][seas]) self.shows[sid][seas][ep][attrib] = value def _setShowData(self, sid, key, value): """Sets self.shows[sid] to a new Show instance, or sets the data """ if sid not in self.shows: self.shows[sid] = Show() self.shows[sid].data[key] = value def _cleanData(self, data): """Cleans up strings returned by TheTVDB.com Issues corrected: - Replaces &amp; with & - Trailing whitespace """ data = data.replace(u"&amp;", u"&") data = data.strip() return data def search(self, series): """This searches TheTVDB.com for the series name and returns the result list """ series = urllib.quote(series.encode("utf-8")) log().debug("Searching for show %s" % series) seriesEt = self._getetsrc(self.config['url_getSeries'] % (series)) allSeries = [] for series in seriesEt: result = dict((k.tag.lower(), k.text) for k in series.getchildren()) result['id'] = int(result['id']) result['lid'] = self.config['langabbv_to_id'][result['language']] if 'aliasnames' in result: result['aliasnames'] = result['aliasnames'].split("|") log().debug('Found series %(seriesname)s' % result) allSeries.append(result) return allSeries def _getSeries(self, series): """This searches TheTVDB.com for the series name, If a custom_ui UI is configured, it uses this to select the correct series. If not, and interactive == True, ConsoleUI is used, if not BaseUI is used to select the first result. """ allSeries = self.search(series) if len(allSeries) == 0: log().debug('Series result returned zero') raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)") if self.config['custom_ui'] is not None: log().debug("Using custom UI %s" % (repr(self.config['custom_ui']))) ui = self.config['custom_ui'](config = self.config) else: if not self.config['interactive']: log().debug('Auto-selecting first search result using BaseUI') ui = BaseUI(config = self.config) else: log().debug('Interactively selecting show using ConsoleUI') ui = ConsoleUI(config = self.config) return ui.selectSeries(allSeries) def _parseBanners(self, sid): """Parses banners XML, from http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml Banners are retrieved using t['show name]['_banners'], for example: >>> t = Tvdb(banners = True) >>> t['scrubs']['_banners'].keys() ['fanart', 'poster', 'series', 'season'] >>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath'] u'http://thetvdb.com/banners/posters/76156-2.jpg' >>> Any key starting with an underscore has been processed (not the raw data from the XML) This interface will be improved in future versions. """ log().debug('Getting season banners for %s' % (sid)) bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) ) banners = {} for cur_banner in bannersEt.findall('Banner'): bid = cur_banner.find('id').text btype = cur_banner.find('BannerType') btype2 = cur_banner.find('BannerType2') if btype is None or btype2 is None: continue btype, btype2 = btype.text, btype2.text if not btype in banners: banners[btype] = {} if not btype2 in banners[btype]: banners[btype][btype2] = {} if not bid in banners[btype][btype2]: banners[btype][btype2][bid] = {} for cur_element in cur_banner.getchildren(): tag = cur_element.tag.lower() value = cur_element.text if tag is None or value is None: continue tag, value = tag.lower(), value.lower() banners[btype][btype2][bid][tag] = value for k, v in banners[btype][btype2][bid].items(): if k.endswith("path"): new_key = "_%s" % (k) log().debug("Transforming %s to %s" % (k, new_key)) new_url = self.config['url_artworkPrefix'] % (v) banners[btype][btype2][bid][new_key] = new_url self._setShowData(sid, "_banners", banners) def _parseActors(self, sid): """Parsers actors XML, from http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml Actors are retrieved using t['show name]['_actors'], for example: >>> t = Tvdb(actors = True) >>> actors = t['scrubs']['_actors'] >>> type(actors) <class 'tvdb_api.Actors'> >>> type(actors[0]) <class 'tvdb_api.Actor'> >>> actors[0] <Actor "Zach Braff"> >>> sorted(actors[0].keys()) ['id', 'image', 'name', 'role', 'sortorder'] >>> actors[0]['name'] u'Zach Braff' >>> actors[0]['image'] u'http://thetvdb.com/banners/actors/43640.jpg' Any key starting with an underscore has been processed (not the raw data from the XML) """ log().debug("Getting actors for %s" % (sid)) actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid)) cur_actors = Actors() for curActorItem in actorsEt.findall("Actor"): curActor = Actor() for curInfo in curActorItem: tag = curInfo.tag.lower() value = curInfo.text if value is not None: if tag == "image": value = self.config['url_artworkPrefix'] % (value) else: value = self._cleanData(value) curActor[tag] = value cur_actors.append(curActor) self._setShowData(sid, '_actors', cur_actors) def _getShowData(self, sid, language): """Takes a series ID, gets the epInfo URL and parses the TVDB XML file into the shows dict in layout: shows[series_id][season_number][episode_number] """ if self.config['language'] is None: log().debug('Config language is none, using show language') if language is None: raise tvdb_error("config['language'] was None, this should not happen") getShowInLanguage = language else: log().debug( 'Configured language %s override show language of %s' % ( self.config['language'], language ) ) getShowInLanguage = self.config['language'] # Parse show information log().debug('Getting all series data for %s' % (sid)) seriesInfoEt = self._getetsrc( self.config['url_seriesInfo'] % (sid, getShowInLanguage) ) for curInfo in seriesInfoEt.findall("Series")[0]: tag = curInfo.tag.lower() value = curInfo.text if value is not None: if tag in ['banner', 'fanart', 'poster']: value = self.config['url_artworkPrefix'] % (value) else: value = self._cleanData(value) self._setShowData(sid, tag, value) # Parse banners if self.config['banners_enabled']: self._parseBanners(sid) # Parse actors if self.config['actors_enabled']: self._parseActors(sid) # Parse episode data log().debug('Getting all episodes of %s' % (sid)) if self.config['useZip']: url = self.config['url_epInfo_zip'] % (sid, language) else: url = self.config['url_epInfo'] % (sid, language) epsEt = self._getetsrc( url, language=language) for cur_ep in epsEt.findall("Episode"): if self.config['dvdorder']: log().debug('Using DVD ordering.') use_dvd = cur_ep.find('DVD_season').text != None and cur_ep.find('DVD_episodenumber').text != None else: use_dvd = False if use_dvd: elem_seasnum, elem_epno = cur_ep.find('DVD_season'), cur_ep.find('DVD_episodenumber') else: elem_seasnum, elem_epno = cur_ep.find('SeasonNumber'), cur_ep.find('EpisodeNumber') if elem_seasnum is None or elem_epno is None: log().warning("An episode has incomplete season/episode number (season: %r, episode: %r)" % ( elem_seasnum, elem_epno)) log().debug( " ".join( "%r is %r" % (child.tag, child.text) for child in cur_ep.getchildren())) # TODO: Should this happen? continue # Skip to next episode # float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data seas_no = int(float(elem_seasnum.text)) ep_no = int(float(elem_epno.text)) for cur_item in cur_ep.getchildren(): tag = cur_item.tag.lower() value = cur_item.text if value is not None: if tag == 'filename': value = self.config['url_artworkPrefix'] % (value) else: value = self._cleanData(value) self._setItem(sid, seas_no, ep_no, tag, value) def _nameToSid(self, name): """Takes show name, returns the correct series ID (if the show has already been grabbed), or grabs all episodes and returns the correct SID. """ if name in self.corrections: log().debug('Correcting %s to %s' % (name, self.corrections[name]) ) sid = self.corrections[name] else: log().debug('Getting show %s' % (name)) selected_series = self._getSeries( name ) sname, sid = selected_series['seriesname'], selected_series['id'] log().debug('Got %(seriesname)s, id %(id)s' % selected_series) self.corrections[name] = sid self._getShowData(selected_series['id'], selected_series['language']) return sid def __getitem__(self, key): """Handles tvdb_instance['seriesname'] calls. The dict index should be the show id """ if isinstance(key, (int, long)): # Item is integer, treat as show id if key not in self.shows: self._getShowData(key, self.config['language']) return self.shows[key] key = key.lower() # make key lower case sid = self._nameToSid(key) log().debug('Got series id %s' % (sid)) return self.shows[sid] def __repr__(self): return str(self.shows) def main(): """Simple example of using tvdb_api - it just grabs an episode name interactively. """ import logging logging.basicConfig(level=logging.DEBUG) tvdb_instance = Tvdb(interactive=True, cache=False) print tvdb_instance['Lost']['seriesname'] print tvdb_instance['Lost'][1][4]['episodename'] if __name__ == '__main__': main()
33,364
Python
.py
726
34.914601
130
0.577423
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,393
test_tvdb_api.py
midgetspy_Sick-Beard/lib/tvdb_api/tests/test_tvdb_api.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """Unittests for tvdb_api """ import os import sys import datetime import unittest # Force parent directory onto path sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import tvdb_api import tvdb_ui from tvdb_api import (tvdb_shownotfound, tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound) class test_tvdb_basic(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_different_case(self): """Checks the auto-correction of show names is working. It should correct the weirdly capitalised 'sCruBs' to 'Scrubs' """ self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady') self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs') def test_spaces(self): """Checks shownames with spaces """ self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl') self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death') def test_numeric(self): """Checks numeric show names """ self.assertEquals(self.t['24'][2][20]['episodename'], 'Day 2: 3:00 A.M.-4:00 A.M.') self.assertEquals(self.t['24']['seriesname'], '24') def test_show_iter(self): """Iterating over a show returns each seasons """ self.assertEquals( len( [season for season in self.t['Life on Mars']] ), 2 ) def test_season_iter(self): """Iterating over a show returns episodes """ self.assertEquals( len( [episode for episode in self.t['Life on Mars'][1]] ), 8 ) def test_get_episode_overview(self): """Checks episode overview is retrieved correctly. """ self.assertEquals( self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith( 'When a new copy of Doral, a Cylon who had been previously'), True ) def test_get_parent(self): """Check accessing series from episode instance """ show = self.t['Battlestar Galactica (2003)'] season = show[1] episode = show[1][1] self.assertEquals( season.show, show ) self.assertEquals( episode.season, season ) self.assertEquals( episode.season.show, show ) def test_no_season(self): show = self.t['Katekyo Hitman Reborn'] print tvdb_api print show[1][1] class test_tvdb_errors(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_seasonnotfound(self): """Checks exception is thrown when season doesn't exist. """ self.assertRaises(tvdb_seasonnotfound, lambda:self.t['CNNNN'][10][1]) def test_shownotfound(self): """Checks exception is thrown when episode doesn't exist. """ self.assertRaises(tvdb_shownotfound, lambda:self.t['the fake show thingy']) def test_episodenotfound(self): """Checks exception is raised for non-existent episode """ self.assertRaises(tvdb_episodenotfound, lambda:self.t['Scrubs'][1][30]) def test_attributenamenotfound(self): """Checks exception is thrown for if an attribute isn't found. """ self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN'][1][6]['afakeattributething']) self.assertRaises(tvdb_attributenotfound, lambda:self.t['CNNNN']['afakeattributething']) class test_tvdb_search(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_search_len(self): """There should be only one result matching """ self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1) def test_search_checkname(self): """Checks you can get the episode name of a search result """ self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day') self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death') def test_search_multiresults(self): """Checks search can return multiple results """ self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True) def test_search_no_params_error(self): """Checks not supplying search info raises TypeError""" self.assertRaises( TypeError, lambda: self.t['Scrubs'].search() ) def test_search_season(self): """Checks the searching of a single season""" self.assertEquals( len(self.t['Scrubs'][1].search("First")), 3 ) def test_search_show(self): """Checks the searching of an entire show""" self.assertEquals( len(self.t['CNNNN'].search('CNNNN', key='episodename')), 3 ) def test_aired_on(self): """Tests airedOn show method""" sr = self.t['Scrubs'].airedOn(datetime.date(2001, 10, 2)) self.assertEquals(len(sr), 1) self.assertEquals(sr[0]['episodename'], u'My First Day') class test_tvdb_data(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_episode_data(self): """Check the firstaired value is retrieved """ self.assertEquals( self.t['lost']['firstaired'], '2004-09-22' ) class test_tvdb_misc(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_repr_show(self): """Check repr() of Season """ self.assertEquals( repr(self.t['CNNNN']), "<Show Chaser Non-Stop News Network (CNNNN) (containing 3 seasons)>" ) def test_repr_season(self): """Check repr() of Season """ self.assertEquals( repr(self.t['CNNNN'][1]), "<Season instance (containing 9 episodes)>" ) def test_repr_episode(self): """Check repr() of Episode """ self.assertEquals( repr(self.t['CNNNN'][1][1]), "<Episode 01x01 - Terror Alert>" ) def test_have_all_languages(self): """Check valid_languages is up-to-date (compared to languages.xml) """ et = self.t._getetsrc( "http://thetvdb.com/api/%s/languages.xml" % ( self.t.config['apikey'] ) ) languages = [x.find("abbreviation").text for x in et.findall("Language")] self.assertEquals( sorted(languages), sorted(self.t.config['valid_languages']) ) class test_tvdb_languages(unittest.TestCase): def test_episode_name_french(self): """Check episode data is in French (language="fr") """ t = tvdb_api.Tvdb(cache = True, language = "fr") self.assertEquals( t['scrubs'][1][1]['episodename'], "Mon premier jour" ) self.assertTrue( t['scrubs']['overview'].startswith( u"J.D. est un jeune m\xe9decin qui d\xe9bute" ) ) def test_episode_name_spanish(self): """Check episode data is in Spanish (language="es") """ t = tvdb_api.Tvdb(cache = True, language = "es") self.assertEquals( t['scrubs'][1][1]['episodename'], "Mi Primer Dia" ) self.assertTrue( t['scrubs']['overview'].startswith( u'Scrubs es una divertida comedia' ) ) def test_multilanguage_selection(self): """Check selected language is used """ class SelectEnglishUI(tvdb_ui.BaseUI): def selectSeries(self, allSeries): return [x for x in allSeries if x['language'] == "en"][0] class SelectItalianUI(tvdb_ui.BaseUI): def selectSeries(self, allSeries): return [x for x in allSeries if x['language'] == "it"][0] t_en = tvdb_api.Tvdb( cache=True, custom_ui = SelectEnglishUI, language = "en") t_it = tvdb_api.Tvdb( cache=True, custom_ui = SelectItalianUI, language = "it") self.assertEquals( t_en['dexter'][1][2]['episodename'], "Crocodile" ) self.assertEquals( t_it['dexter'][1][2]['episodename'], "Lacrime di coccodrillo" ) class test_tvdb_unicode(unittest.TestCase): def test_search_in_chinese(self): """Check searching for show with language=zh returns Chinese seriesname """ t = tvdb_api.Tvdb(cache = True, language = "zh") show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'] self.assertEquals( type(show), tvdb_api.Show ) self.assertEquals( show['seriesname'], u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i' ) def test_search_in_all_languages(self): """Check search_all_languages returns Chinese show, with language=en """ t = tvdb_api.Tvdb(cache = True, search_all_languages = True, language="en") show = t[u'T\xecnh Ng\u01b0\u1eddi Hi\u1ec7n \u0110\u1ea1i'] self.assertEquals( type(show), tvdb_api.Show ) self.assertEquals( show['seriesname'], u'Virtues Of Harmony II' ) class test_tvdb_banners(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = True) def test_have_banners(self): """Check banners at least one banner is found """ self.assertEquals( len(self.t['scrubs']['_banners']) > 0, True ) def test_banner_url(self): """Checks banner URLs start with http:// """ for banner_type, banner_data in self.t['scrubs']['_banners'].items(): for res, res_data in banner_data.items(): for bid, banner_info in res_data.items(): self.assertEquals( banner_info['_bannerpath'].startswith("http://"), True ) def test_episode_image(self): """Checks episode 'filename' image is fully qualified URL """ self.assertEquals( self.t['scrubs'][1][1]['filename'].startswith("http://"), True ) def test_show_artwork(self): """Checks various image URLs within season data are fully qualified """ for key in ['banner', 'fanart', 'poster']: self.assertEquals( self.t['scrubs'][key].startswith("http://"), True ) class test_tvdb_actors(unittest.TestCase): t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True) def test_actors_is_correct_datatype(self): """Check show/_actors key exists and is correct type""" self.assertTrue( isinstance( self.t['scrubs']['_actors'], tvdb_api.Actors ) ) def test_actors_has_actor(self): """Check show has at least one Actor """ self.assertTrue( isinstance( self.t['scrubs']['_actors'][0], tvdb_api.Actor ) ) def test_actor_has_name(self): """Check first actor has a name""" self.assertEquals( self.t['scrubs']['_actors'][0]['name'], "Zach Braff" ) def test_actor_image_corrected(self): """Check image URL is fully qualified """ for actor in self.t['scrubs']['_actors']: if actor['image'] is not None: # Actor's image can be None, it displays as the placeholder # image on thetvdb.com self.assertTrue( actor['image'].startswith("http://") ) class test_tvdb_doctest(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, banners = False) def test_doctest(self): """Check docstring examples works""" import doctest doctest.testmod(tvdb_api) class test_tvdb_custom_caching(unittest.TestCase): def test_true_false_string(self): """Tests setting cache to True/False/string Basic tests, only checking for errors """ tvdb_api.Tvdb(cache = True) tvdb_api.Tvdb(cache = False) tvdb_api.Tvdb(cache = "/tmp") def test_invalid_cache_option(self): """Tests setting cache to invalid value """ try: tvdb_api.Tvdb(cache = 2.3) except ValueError: pass else: self.fail("Expected ValueError from setting cache to float") def test_custom_urlopener(self): class UsedCustomOpener(Exception): pass import urllib2 class TestOpener(urllib2.BaseHandler): def default_open(self, request): print request.get_method() raise UsedCustomOpener("Something") custom_opener = urllib2.build_opener(TestOpener()) t = tvdb_api.Tvdb(cache = custom_opener) try: t['scrubs'] except UsedCustomOpener: pass else: self.fail("Did not use custom opener") class test_tvdb_by_id(unittest.TestCase): t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True) def test_actors_is_correct_datatype(self): """Check show/_actors key exists and is correct type""" self.assertEquals( self.t[76156]['seriesname'], 'Scrubs' ) class test_tvdb_zip(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True) def test_get_series_from_zip(self): """ """ self.assertEquals(self.t['scrubs'][1][4]['episodename'], 'My Old Lady') self.assertEquals(self.t['sCruBs']['seriesname'], 'Scrubs') def test_spaces_from_zip(self): """Checks shownames with spaces """ self.assertEquals(self.t['My Name Is Earl']['seriesname'], 'My Name Is Earl') self.assertEquals(self.t['My Name Is Earl'][1][4]['episodename'], 'Faked His Own Death') class test_tvdb_show_ordering(unittest.TestCase): # Used to store the cached instance of Tvdb() t_dvd = None t_air = None def setUp(self): if self.t_dvd is None: self.t_dvd = tvdb_api.Tvdb(cache = True, useZip = True, dvdorder=True) if self.t_air is None: self.t_air = tvdb_api.Tvdb(cache = True, useZip = True) def test_ordering(self): """Test Tvdb.search method """ self.assertEquals(u'The Train Job', self.t_air['Firefly'][1][1]['episodename']) self.assertEquals(u'Serenity', self.t_dvd['Firefly'][1][1]['episodename']) self.assertEquals(u'The Cat & the Claw (Part 1)', self.t_air['Batman The Animated Series'][1][1]['episodename']) self.assertEquals(u'On Leather Wings', self.t_dvd['Batman The Animated Series'][1][1]['episodename']) class test_tvdb_show_search(unittest.TestCase): # Used to store the cached instance of Tvdb() t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, useZip = True) def test_search(self): """Test Tvdb.search method """ results = self.t.search("my name is earl") all_ids = [x['seriesid'] for x in results] self.assertTrue('75397' in all_ids) class test_tvdb_alt_names(unittest.TestCase): t = None def setUp(self): if self.t is None: self.__class__.t = tvdb_api.Tvdb(cache = True, actors = True) def test_1(self): """Tests basic access of series name alias """ results = self.t.search("Don't Trust the B---- in Apartment 23") series = results[0] self.assertTrue( 'Apartment 23' in series['aliasnames'] ) if __name__ == '__main__': runner = unittest.TextTestRunner(verbosity = 2) unittest.main(testRunner = runner)
17,826
Python
.py
473
28.274841
123
0.580177
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,394
gprof2dot.py
midgetspy_Sick-Beard/lib/tvdb_api/tests/gprof2dot.py
#!/usr/bin/env python # # Copyright 2008 Jose Fonseca # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Generate a dot graph from the output of several profilers.""" __author__ = "Jose Fonseca" __version__ = "1.0" import sys import math import os.path import re import textwrap import optparse try: # Debugging helper module import debug except ImportError: pass def percentage(p): return "%.02f%%" % (p*100.0,) def add(a, b): return a + b def equal(a, b): if a == b: return a else: return None def fail(a, b): assert False def ratio(numerator, denominator): numerator = float(numerator) denominator = float(denominator) assert 0.0 <= numerator assert numerator <= denominator try: return numerator/denominator except ZeroDivisionError: # 0/0 is undefined, but 1.0 yields more useful results return 1.0 class UndefinedEvent(Exception): """Raised when attempting to get an event which is undefined.""" def __init__(self, event): Exception.__init__(self) self.event = event def __str__(self): return 'unspecified event %s' % self.event.name class Event(object): """Describe a kind of event, and its basic operations.""" def __init__(self, name, null, aggregator, formatter = str): self.name = name self._null = null self._aggregator = aggregator self._formatter = formatter def __eq__(self, other): return self is other def __hash__(self): return id(self) def null(self): return self._null def aggregate(self, val1, val2): """Aggregate two event values.""" assert val1 is not None assert val2 is not None return self._aggregator(val1, val2) def format(self, val): """Format an event value.""" assert val is not None return self._formatter(val) MODULE = Event("Module", None, equal) PROCESS = Event("Process", None, equal) CALLS = Event("Calls", 0, add) SAMPLES = Event("Samples", 0, add) TIME = Event("Time", 0.0, add, lambda x: '(' + str(x) + ')') TIME_RATIO = Event("Time ratio", 0.0, add, lambda x: '(' + percentage(x) + ')') TOTAL_TIME = Event("Total time", 0.0, fail) TOTAL_TIME_RATIO = Event("Total time ratio", 0.0, fail, percentage) CALL_RATIO = Event("Call ratio", 0.0, add, percentage) PRUNE_RATIO = Event("Prune ratio", 0.0, add, percentage) class Object(object): """Base class for all objects in profile which can store events.""" def __init__(self, events=None): if events is None: self.events = {} else: self.events = events def __hash__(self): return id(self) def __eq__(self, other): return self is other def __contains__(self, event): return event in self.events def __getitem__(self, event): try: return self.events[event] except KeyError: raise UndefinedEvent(event) def __setitem__(self, event, value): if value is None: if event in self.events: del self.events[event] else: self.events[event] = value class Call(Object): """A call between functions. There should be at most one call object for every pair of functions. """ def __init__(self, callee_id): Object.__init__(self) self.callee_id = callee_id class Function(Object): """A function.""" def __init__(self, id, name): Object.__init__(self) self.id = id self.name = name self.calls = {} self.cycle = None def add_call(self, call): if call.callee_id in self.calls: sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id))) self.calls[call.callee_id] = call # TODO: write utility functions def __repr__(self): return self.name class Cycle(Object): """A cycle made from recursive function calls.""" def __init__(self): Object.__init__(self) # XXX: Do cycles need an id? self.functions = set() def add_function(self, function): assert function not in self.functions self.functions.add(function) # XXX: Aggregate events? if function.cycle is not None: for other in function.cycle.functions: if function not in self.functions: self.add_function(other) function.cycle = self class Profile(Object): """The whole profile.""" def __init__(self): Object.__init__(self) self.functions = {} self.cycles = [] def add_function(self, function): if function.id in self.functions: sys.stderr.write('warning: overwriting function %s (id %s)\n' % (function.name, str(function.id))) self.functions[function.id] = function def add_cycle(self, cycle): self.cycles.append(cycle) def validate(self): """Validate the edges.""" for function in self.functions.itervalues(): for callee_id in function.calls.keys(): assert function.calls[callee_id].callee_id == callee_id if callee_id not in self.functions: sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)) del function.calls[callee_id] def find_cycles(self): """Find cycles using Tarjan's strongly connected components algorithm.""" # Apply the Tarjan's algorithm successively until all functions are visited visited = set() for function in self.functions.itervalues(): if function not in visited: self._tarjan(function, 0, [], {}, {}, visited) cycles = [] for function in self.functions.itervalues(): if function.cycle is not None and function.cycle not in cycles: cycles.append(function.cycle) self.cycles = cycles if 0: for cycle in cycles: sys.stderr.write("Cycle:\n") for member in cycle.functions: sys.stderr.write("\t%s\n" % member.name) def _tarjan(self, function, order, stack, orders, lowlinks, visited): """Tarjan's strongly connected components algorithm. See also: - http://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm """ visited.add(function) orders[function] = order lowlinks[function] = order order += 1 pos = len(stack) stack.append(function) for call in function.calls.itervalues(): callee = self.functions[call.callee_id] # TODO: use a set to optimize lookup if callee not in orders: order = self._tarjan(callee, order, stack, orders, lowlinks, visited) lowlinks[function] = min(lowlinks[function], lowlinks[callee]) elif callee in stack: lowlinks[function] = min(lowlinks[function], orders[callee]) if lowlinks[function] == orders[function]: # Strongly connected component found members = stack[pos:] del stack[pos:] if len(members) > 1: cycle = Cycle() for member in members: cycle.add_function(member) return order def call_ratios(self, event): # Aggregate for incoming calls cycle_totals = {} for cycle in self.cycles: cycle_totals[cycle] = 0.0 function_totals = {} for function in self.functions.itervalues(): function_totals[function] = 0.0 for function in self.functions.itervalues(): for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] function_totals[callee] += call[event] if callee.cycle is not None and callee.cycle is not function.cycle: cycle_totals[callee.cycle] += call[event] # Compute the ratios for function in self.functions.itervalues(): for call in function.calls.itervalues(): assert CALL_RATIO not in call if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is not None and callee.cycle is not function.cycle: total = cycle_totals[callee.cycle] else: total = function_totals[callee] call[CALL_RATIO] = ratio(call[event], total) def integrate(self, outevent, inevent): """Propagate function time ratio allong the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html """ # Sanity checking assert outevent not in self for function in self.functions.itervalues(): assert outevent not in function assert inevent in function for call in function.calls.itervalues(): assert outevent not in call if call.callee_id != function.id: assert CALL_RATIO in call # Aggregate the input for each cycle for cycle in self.cycles: total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self[inevent] = total # Integrate along the edges total = inevent.null() for function in self.functions.itervalues(): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total def _integrate_function(self, function, outevent, inevent): if function.cycle is not None: return self._integrate_cycle(function.cycle, outevent, inevent) else: if outevent not in function: total = function[inevent] for call in function.calls.itervalues(): if call.callee_id != function.id: total += self._integrate_call(call, outevent, inevent) function[outevent] = total return function[outevent] def _integrate_call(self, call, outevent, inevent): assert outevent not in call assert CALL_RATIO in call callee = self.functions[call.callee_id] subtotal = call[CALL_RATIO]*self._integrate_function(callee, outevent, inevent) call[outevent] = subtotal return subtotal def _integrate_cycle(self, cycle, outevent, inevent): if outevent not in cycle: total = inevent.null() for member in cycle.functions: subtotal = member[inevent] for call in member.calls.itervalues(): callee = self.functions[call.callee_id] if callee.cycle is not cycle: subtotal += self._integrate_call(call, outevent, inevent) total += subtotal cycle[outevent] = total callees = {} for function in self.functions.itervalues(): if function.cycle is not cycle: for call in function.calls.itervalues(): callee = self.functions[call.callee_id] if callee.cycle is cycle: try: callees[callee] += call[CALL_RATIO] except KeyError: callees[callee] = call[CALL_RATIO] for callee, call_ratio in callees.iteritems(): ranks = {} call_ratios = {} partials = {} self._rank_cycle_function(cycle, callee, 0, ranks) self._call_ratios_cycle(cycle, callee, ranks, call_ratios, set()) partial = self._integrate_cycle_function(cycle, callee, call_ratio, partials, ranks, call_ratios, outevent, inevent) assert partial == max(partials.values()) assert not total or abs(1.0 - partial/(call_ratio*total)) <= 0.001 return cycle[outevent] def _rank_cycle_function(self, cycle, function, rank, ranks): if function not in ranks or ranks[function] > rank: ranks[function] = rank for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is cycle: self._rank_cycle_function(cycle, callee, rank + 1, ranks) def _call_ratios_cycle(self, cycle, function, ranks, call_ratios, visited): if function not in visited: visited.add(function) for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is cycle: if ranks[callee] > ranks[function]: call_ratios[callee] = call_ratios.get(callee, 0.0) + call[CALL_RATIO] self._call_ratios_cycle(cycle, callee, ranks, call_ratios, visited) def _integrate_cycle_function(self, cycle, function, partial_ratio, partials, ranks, call_ratios, outevent, inevent): if function not in partials: partial = partial_ratio*function[inevent] for call in function.calls.itervalues(): if call.callee_id != function.id: callee = self.functions[call.callee_id] if callee.cycle is not cycle: assert outevent in call partial += partial_ratio*call[outevent] else: if ranks[callee] > ranks[function]: callee_partial = self._integrate_cycle_function(cycle, callee, partial_ratio, partials, ranks, call_ratios, outevent, inevent) call_ratio = ratio(call[CALL_RATIO], call_ratios[callee]) call_partial = call_ratio*callee_partial try: call[outevent] += call_partial except UndefinedEvent: call[outevent] = call_partial partial += call_partial partials[function] = partial try: function[outevent] += partial except UndefinedEvent: function[outevent] = partial return partials[function] def aggregate(self, event): """Aggregate an event for the whole profile.""" total = event.null() for function in self.functions.itervalues(): try: total = event.aggregate(total, function[event]) except UndefinedEvent: return self[event] = total def ratio(self, outevent, inevent): assert outevent not in self assert inevent in self for function in self.functions.itervalues(): assert outevent not in function assert inevent in function function[outevent] = ratio(function[inevent], self[inevent]) for call in function.calls.itervalues(): assert outevent not in call if inevent in call: call[outevent] = ratio(call[inevent], self[inevent]) self[outevent] = 1.0 def prune(self, node_thres, edge_thres): """Prune the profile""" # compute the prune ratios for function in self.functions.itervalues(): try: function[PRUNE_RATIO] = function[TOTAL_TIME_RATIO] except UndefinedEvent: pass for call in function.calls.itervalues(): callee = self.functions[call.callee_id] if TOTAL_TIME_RATIO in call: # handle exact cases first call[PRUNE_RATIO] = call[TOTAL_TIME_RATIO] else: try: # make a safe estimate call[PRUNE_RATIO] = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) except UndefinedEvent: pass # prune the nodes for function_id in self.functions.keys(): function = self.functions[function_id] try: if function[PRUNE_RATIO] < node_thres: del self.functions[function_id] except UndefinedEvent: pass # prune the egdes for function in self.functions.itervalues(): for callee_id in function.calls.keys(): call = function.calls[callee_id] try: if callee_id not in self.functions or call[PRUNE_RATIO] < edge_thres: del function.calls[callee_id] except UndefinedEvent: pass def dump(self): for function in self.functions.itervalues(): sys.stderr.write('Function %s:\n' % (function.name,)) self._dump_events(function.events) for call in function.calls.itervalues(): callee = self.functions[call.callee_id] sys.stderr.write(' Call %s:\n' % (callee.name,)) self._dump_events(call.events) def _dump_events(self, events): for event, value in events.iteritems(): sys.stderr.write(' %s: %s\n' % (event.name, event.format(value))) class Struct: """Masquerade a dictionary with a structure-like behavior.""" def __init__(self, attrs = None): if attrs is None: attrs = {} self.__dict__['_attrs'] = attrs def __getattr__(self, name): try: return self._attrs[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): self._attrs[name] = value def __str__(self): return str(self._attrs) def __repr__(self): return repr(self._attrs) class ParseError(Exception): """Raised when parsing to signal mismatches.""" def __init__(self, msg, line): self.msg = msg # TODO: store more source line information self.line = line def __str__(self): return '%s: %r' % (self.msg, self.line) class Parser: """Parser interface.""" def __init__(self): pass def parse(self): raise NotImplementedError class LineParser(Parser): """Base class for parsers that read line-based formats.""" def __init__(self, file): Parser.__init__(self) self._file = file self.__line = None self.__eof = False def readline(self): line = self._file.readline() if not line: self.__line = '' self.__eof = True self.__line = line.rstrip('\r\n') def lookahead(self): assert self.__line is not None return self.__line def consume(self): assert self.__line is not None line = self.__line self.readline() return line def eof(self): assert self.__line is not None return self.__eof class GprofParser(Parser): """Parser for GNU gprof output. See also: - Chapter "Interpreting gprof's Output" from the GNU gprof manual http://sourceware.org/binutils/docs-2.18/gprof/Call-Graph.html#Call-Graph - File "cg_print.c" from the GNU gprof source code http://sourceware.org/cgi-bin/cvsweb.cgi/~checkout~/src/gprof/cg_print.c?rev=1.12&cvsroot=src """ def __init__(self, fp): Parser.__init__(self) self.fp = fp self.functions = {} self.cycles = {} def readline(self): line = self.fp.readline() if not line: sys.stderr.write('error: unexpected end of file\n') sys.exit(1) line = line.rstrip('\r\n') return line _int_re = re.compile(r'^\d+$') _float_re = re.compile(r'^\d+\.\d+$') def translate(self, mo): """Extract a structure from a match object, while translating the types in the process.""" attrs = {} groupdict = mo.groupdict() for name, value in groupdict.iteritems(): if value is None: value = None elif self._int_re.match(value): value = int(value) elif self._float_re.match(value): value = float(value) attrs[name] = (value) return Struct(attrs) _cg_header_re = re.compile( # original gprof header r'^\s+called/total\s+parents\s*$|' + r'^index\s+%time\s+self\s+descendents\s+called\+self\s+name\s+index\s*$|' + r'^\s+called/total\s+children\s*$|' + # GNU gprof header r'^index\s+%\s+time\s+self\s+children\s+called\s+name\s*$' ) _cg_ignore_re = re.compile( # spontaneous r'^\s+<spontaneous>\s*$|' # internal calls (such as "mcount") r'^.*\((\d+)\)$' ) _cg_primary_re = re.compile( r'^\[(?P<index>\d+)\]' + r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(\d+)\]$' ) _cg_parent_re = re.compile( r'^\s+(?P<self>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(?P<index>\d+)\]$' ) _cg_child_re = _cg_parent_re _cg_cycle_header_re = re.compile( r'^\[(?P<index>\d+)\]' + r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' + r'\s\[(\d+)\]$' ) _cg_cycle_member_re = re.compile( r'^\s+(?P<self>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' + r'\s+(?P<name>\S.*?)' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'\s\[(?P<index>\d+)\]$' ) _cg_sep_re = re.compile(r'^--+$') def parse_function_entry(self, lines): parents = [] children = [] while True: if not lines: sys.stderr.write('warning: unexpected end of entry\n') line = lines.pop(0) if line.startswith('['): break # read function parent line mo = self._cg_parent_re.match(line) if not mo: if self._cg_ignore_re.match(line): continue sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) else: parent = self.translate(mo) parents.append(parent) # read primary line mo = self._cg_primary_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) return else: function = self.translate(mo) while lines: line = lines.pop(0) # read function subroutine line mo = self._cg_child_re.match(line) if not mo: if self._cg_ignore_re.match(line): continue sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) else: child = self.translate(mo) children.append(child) function.parents = parents function.children = children self.functions[function.index] = function def parse_cycle_entry(self, lines): # read cycle header line line = lines[0] mo = self._cg_cycle_header_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) return cycle = self.translate(mo) # read cycle member lines cycle.functions = [] for line in lines[1:]: mo = self._cg_cycle_member_re.match(line) if not mo: sys.stderr.write('warning: unrecognized call graph entry: %r\n' % line) continue call = self.translate(mo) cycle.functions.append(call) self.cycles[cycle.cycle] = cycle def parse_cg_entry(self, lines): if lines[0].startswith("["): self.parse_cycle_entry(lines) else: self.parse_function_entry(lines) def parse_cg(self): """Parse the call graph.""" # skip call graph header while not self._cg_header_re.match(self.readline()): pass line = self.readline() while self._cg_header_re.match(line): line = self.readline() # process call graph entries entry_lines = [] while line != '\014': # form feed if line and not line.isspace(): if self._cg_sep_re.match(line): self.parse_cg_entry(entry_lines) entry_lines = [] else: entry_lines.append(line) line = self.readline() def parse(self): self.parse_cg() self.fp.close() profile = Profile() profile[TIME] = 0.0 cycles = {} for index in self.cycles.iterkeys(): cycles[index] = Cycle() for entry in self.functions.itervalues(): # populate the function function = Function(entry.index, entry.name) function[TIME] = entry.self if entry.called is not None: function[CALLS] = entry.called if entry.called_self is not None: call = Call(entry.index) call[CALLS] = entry.called_self function[CALLS] += entry.called_self # populate the function calls for child in entry.children: call = Call(child.index) assert child.called is not None call[CALLS] = child.called if child.index not in self.functions: # NOTE: functions that were never called but were discovered by gprof's # static call graph analysis dont have a call graph entry so we need # to add them here missing = Function(child.index, child.name) function[TIME] = 0.0 function[CALLS] = 0 profile.add_function(missing) function.add_call(call) profile.add_function(function) if entry.cycle is not None: cycles[entry.cycle].add_function(function) profile[TIME] = profile[TIME] + function[TIME] for cycle in cycles.itervalues(): profile.add_cycle(cycle) # Compute derived events profile.validate() profile.ratio(TIME_RATIO, TIME) profile.call_ratios(CALLS) profile.integrate(TOTAL_TIME, TIME) profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) return profile class OprofileParser(LineParser): """Parser for oprofile callgraph output. See also: - http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph """ _fields_re = { 'samples': r'(?P<samples>\d+)', '%': r'(?P<percentage>\S+)', 'linenr info': r'(?P<source>\(no location information\)|\S+:\d+)', 'image name': r'(?P<image>\S+(?:\s\(tgid:[^)]*\))?)', 'app name': r'(?P<application>\S+)', 'symbol name': r'(?P<symbol>\(no symbols\)|.+?)', } def __init__(self, infile): LineParser.__init__(self, infile) self.entries = {} self.entry_re = None def add_entry(self, callers, function, callees): try: entry = self.entries[function.id] except KeyError: self.entries[function.id] = (callers, function, callees) else: callers_total, function_total, callees_total = entry self.update_subentries_dict(callers_total, callers) function_total.samples += function.samples self.update_subentries_dict(callees_total, callees) def update_subentries_dict(self, totals, partials): for partial in partials.itervalues(): try: total = totals[partial.id] except KeyError: totals[partial.id] = partial else: total.samples += partial.samples def parse(self): # read lookahead self.readline() self.parse_header() while self.lookahead(): self.parse_entry() profile = Profile() reverse_call_samples = {} # populate the profile profile[SAMPLES] = 0 for _callers, _function, _callees in self.entries.itervalues(): function = Function(_function.id, _function.name) function[SAMPLES] = _function.samples profile.add_function(function) profile[SAMPLES] += _function.samples if _function.application: function[PROCESS] = os.path.basename(_function.application) if _function.image: function[MODULE] = os.path.basename(_function.image) total_callee_samples = 0 for _callee in _callees.itervalues(): total_callee_samples += _callee.samples for _callee in _callees.itervalues(): if not _callee.self: call = Call(_callee.id) call[SAMPLES] = _callee.samples function.add_call(call) # compute derived data profile.validate() profile.find_cycles() profile.ratio(TIME_RATIO, SAMPLES) profile.call_ratios(SAMPLES) profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO) return profile def parse_header(self): while not self.match_header(): self.consume() line = self.lookahead() fields = re.split(r'\s\s+', line) entry_re = r'^\s*' + r'\s+'.join([self._fields_re[field] for field in fields]) + r'(?P<self>\s+\[self\])?$' self.entry_re = re.compile(entry_re) self.skip_separator() def parse_entry(self): callers = self.parse_subentries() if self.match_primary(): function = self.parse_subentry() if function is not None: callees = self.parse_subentries() self.add_entry(callers, function, callees) self.skip_separator() def parse_subentries(self): subentries = {} while self.match_secondary(): subentry = self.parse_subentry() subentries[subentry.id] = subentry return subentries def parse_subentry(self): entry = Struct() line = self.consume() mo = self.entry_re.match(line) if not mo: raise ParseError('failed to parse', line) fields = mo.groupdict() entry.samples = int(fields.get('samples', 0)) entry.percentage = float(fields.get('percentage', 0.0)) if 'source' in fields and fields['source'] != '(no location information)': source = fields['source'] filename, lineno = source.split(':') entry.filename = filename entry.lineno = int(lineno) else: source = '' entry.filename = None entry.lineno = None entry.image = fields.get('image', '') entry.application = fields.get('application', '') if 'symbol' in fields and fields['symbol'] != '(no symbols)': entry.symbol = fields['symbol'] else: entry.symbol = '' if entry.symbol.startswith('"') and entry.symbol.endswith('"'): entry.symbol = entry.symbol[1:-1] entry.id = ':'.join((entry.application, entry.image, source, entry.symbol)) entry.self = fields.get('self', None) != None if entry.self: entry.id += ':self' if entry.symbol: entry.name = entry.symbol else: entry.name = entry.image return entry def skip_separator(self): while not self.match_separator(): self.consume() self.consume() def match_header(self): line = self.lookahead() return line.startswith('samples') def match_separator(self): line = self.lookahead() return line == '-'*len(line) def match_primary(self): line = self.lookahead() return not line[:1].isspace() def match_secondary(self): line = self.lookahead() return line[:1].isspace() class SharkParser(LineParser): """Parser for MacOSX Shark output. Author: [email protected] """ def __init__(self, infile): LineParser.__init__(self, infile) self.stack = [] self.entries = {} def add_entry(self, function): try: entry = self.entries[function.id] except KeyError: self.entries[function.id] = (function, { }) else: function_total, callees_total = entry function_total.samples += function.samples def add_callee(self, function, callee): func, callees = self.entries[function.id] try: entry = callees[callee.id] except KeyError: callees[callee.id] = callee else: entry.samples += callee.samples def parse(self): self.readline() self.readline() self.readline() self.readline() match = re.compile(r'(?P<prefix>[|+ ]*)(?P<samples>\d+), (?P<symbol>[^,]+), (?P<image>.*)') while self.lookahead(): line = self.consume() mo = match.match(line) if not mo: raise ParseError('failed to parse', line) fields = mo.groupdict() prefix = len(fields.get('prefix', 0)) / 2 - 1 symbol = str(fields.get('symbol', 0)) image = str(fields.get('image', 0)) entry = Struct() entry.id = ':'.join([symbol, image]) entry.samples = int(fields.get('samples', 0)) entry.name = symbol entry.image = image # adjust the callstack if prefix < len(self.stack): del self.stack[prefix:] if prefix == len(self.stack): self.stack.append(entry) # if the callstack has had an entry, it's this functions caller if prefix > 0: self.add_callee(self.stack[prefix - 1], entry) self.add_entry(entry) profile = Profile() profile[SAMPLES] = 0 for _function, _callees in self.entries.itervalues(): function = Function(_function.id, _function.name) function[SAMPLES] = _function.samples profile.add_function(function) profile[SAMPLES] += _function.samples if _function.image: function[MODULE] = os.path.basename(_function.image) for _callee in _callees.itervalues(): call = Call(_callee.id) call[SAMPLES] = _callee.samples function.add_call(call) # compute derived data profile.validate() profile.find_cycles() profile.ratio(TIME_RATIO, SAMPLES) profile.call_ratios(SAMPLES) profile.integrate(TOTAL_TIME_RATIO, TIME_RATIO) return profile class PstatsParser: """Parser python profiling statistics saved with te pstats module.""" def __init__(self, *filename): import pstats self.stats = pstats.Stats(*filename) self.profile = Profile() self.function_ids = {} def get_function_name(self, (filename, line, name)): module = os.path.splitext(filename)[0] module = os.path.basename(module) return "%s:%d:%s" % (module, line, name) def get_function(self, key): try: id = self.function_ids[key] except KeyError: id = len(self.function_ids) name = self.get_function_name(key) function = Function(id, name) self.profile.functions[id] = function self.function_ids[key] = id else: function = self.profile.functions[id] return function def parse(self): self.profile[TIME] = 0.0 self.profile[TOTAL_TIME] = self.stats.total_tt for fn, (cc, nc, tt, ct, callers) in self.stats.stats.iteritems(): callee = self.get_function(fn) callee[CALLS] = nc callee[TOTAL_TIME] = ct callee[TIME] = tt self.profile[TIME] += tt self.profile[TOTAL_TIME] = max(self.profile[TOTAL_TIME], ct) for fn, value in callers.iteritems(): caller = self.get_function(fn) call = Call(callee.id) if isinstance(value, tuple): for i in xrange(0, len(value), 4): nc, cc, tt, ct = value[i:i+4] if CALLS in call: call[CALLS] += cc else: call[CALLS] = cc if TOTAL_TIME in call: call[TOTAL_TIME] += ct else: call[TOTAL_TIME] = ct else: call[CALLS] = value call[TOTAL_TIME] = ratio(value, nc)*ct caller.add_call(call) #self.stats.print_stats() #self.stats.print_callees() # Compute derived events self.profile.validate() self.profile.ratio(TIME_RATIO, TIME) self.profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) return self.profile class Theme: def __init__(self, bgcolor = (0.0, 0.0, 1.0), mincolor = (0.0, 0.0, 0.0), maxcolor = (0.0, 0.0, 1.0), fontname = "Arial", minfontsize = 10.0, maxfontsize = 10.0, minpenwidth = 0.5, maxpenwidth = 4.0, gamma = 2.2): self.bgcolor = bgcolor self.mincolor = mincolor self.maxcolor = maxcolor self.fontname = fontname self.minfontsize = minfontsize self.maxfontsize = maxfontsize self.minpenwidth = minpenwidth self.maxpenwidth = maxpenwidth self.gamma = gamma def graph_bgcolor(self): return self.hsl_to_rgb(*self.bgcolor) def graph_fontname(self): return self.fontname def graph_fontsize(self): return self.minfontsize def node_bgcolor(self, weight): return self.color(weight) def node_fgcolor(self, weight): return self.graph_bgcolor() def node_fontsize(self, weight): return self.fontsize(weight) def edge_color(self, weight): return self.color(weight) def edge_fontsize(self, weight): return self.fontsize(weight) def edge_penwidth(self, weight): return max(weight*self.maxpenwidth, self.minpenwidth) def edge_arrowsize(self, weight): return 0.5 * math.sqrt(self.edge_penwidth(weight)) def fontsize(self, weight): return max(weight**2 * self.maxfontsize, self.minfontsize) def color(self, weight): weight = min(max(weight, 0.0), 1.0) hmin, smin, lmin = self.mincolor hmax, smax, lmax = self.maxcolor h = hmin + weight*(hmax - hmin) s = smin + weight*(smax - smin) l = lmin + weight*(lmax - lmin) return self.hsl_to_rgb(h, s, l) def hsl_to_rgb(self, h, s, l): """Convert a color from HSL color-model to RGB. See also: - http://www.w3.org/TR/css3-color/#hsl-color """ h = h % 1.0 s = min(max(s, 0.0), 1.0) l = min(max(l, 0.0), 1.0) if l <= 0.5: m2 = l*(s + 1.0) else: m2 = l + s - l*s m1 = l*2.0 - m2 r = self._hue_to_rgb(m1, m2, h + 1.0/3.0) g = self._hue_to_rgb(m1, m2, h) b = self._hue_to_rgb(m1, m2, h - 1.0/3.0) # Apply gamma correction r **= self.gamma g **= self.gamma b **= self.gamma return (r, g, b) def _hue_to_rgb(self, m1, m2, h): if h < 0.0: h += 1.0 elif h > 1.0: h -= 1.0 if h*6 < 1.0: return m1 + (m2 - m1)*h*6.0 elif h*2 < 1.0: return m2 elif h*3 < 2.0: return m1 + (m2 - m1)*(2.0/3.0 - h)*6.0 else: return m1 TEMPERATURE_COLORMAP = Theme( mincolor = (2.0/3.0, 0.80, 0.25), # dark blue maxcolor = (0.0, 1.0, 0.5), # satured red gamma = 1.0 ) PINK_COLORMAP = Theme( mincolor = (0.0, 1.0, 0.90), # pink maxcolor = (0.0, 1.0, 0.5), # satured red ) GRAY_COLORMAP = Theme( mincolor = (0.0, 0.0, 0.85), # light gray maxcolor = (0.0, 0.0, 0.0), # black ) BW_COLORMAP = Theme( minfontsize = 8.0, maxfontsize = 24.0, mincolor = (0.0, 0.0, 0.0), # black maxcolor = (0.0, 0.0, 0.0), # black minpenwidth = 0.1, maxpenwidth = 8.0, ) class DotWriter: """Writer for the DOT language. See also: - "The DOT Language" specification http://www.graphviz.org/doc/info/lang.html """ def __init__(self, fp): self.fp = fp def graph(self, profile, theme): self.begin_graph() fontname = theme.graph_fontname() self.attr('graph', fontname=fontname, ranksep=0.25, nodesep=0.125) self.attr('node', fontname=fontname, shape="box", style="filled,rounded", fontcolor="white", width=0, height=0) self.attr('edge', fontname=fontname) for function in profile.functions.itervalues(): labels = [] for event in PROCESS, MODULE: if event in function.events: label = event.format(function[event]) labels.append(label) labels.append(function.name) for event in TOTAL_TIME_RATIO, TIME_RATIO, CALLS: if event in function.events: label = event.format(function[event]) labels.append(label) try: weight = function[PRUNE_RATIO] except UndefinedEvent: weight = 0.0 label = '\n'.join(labels) self.node(function.id, label = label, color = self.color(theme.node_bgcolor(weight)), fontcolor = self.color(theme.node_fgcolor(weight)), fontsize = "%.2f" % theme.node_fontsize(weight), ) for call in function.calls.itervalues(): callee = profile.functions[call.callee_id] labels = [] for event in TOTAL_TIME_RATIO, CALLS: if event in call.events: label = event.format(call[event]) labels.append(label) try: weight = call[PRUNE_RATIO] except UndefinedEvent: try: weight = callee[PRUNE_RATIO] except UndefinedEvent: weight = 0.0 label = '\n'.join(labels) self.edge(function.id, call.callee_id, label = label, color = self.color(theme.edge_color(weight)), fontcolor = self.color(theme.edge_color(weight)), fontsize = "%.2f" % theme.edge_fontsize(weight), penwidth = "%.2f" % theme.edge_penwidth(weight), labeldistance = "%.2f" % theme.edge_penwidth(weight), arrowsize = "%.2f" % theme.edge_arrowsize(weight), ) self.end_graph() def begin_graph(self): self.write('digraph {\n') def end_graph(self): self.write('}\n') def attr(self, what, **attrs): self.write("\t") self.write(what) self.attr_list(attrs) self.write(";\n") def node(self, node, **attrs): self.write("\t") self.id(node) self.attr_list(attrs) self.write(";\n") def edge(self, src, dst, **attrs): self.write("\t") self.id(src) self.write(" -> ") self.id(dst) self.attr_list(attrs) self.write(";\n") def attr_list(self, attrs): if not attrs: return self.write(' [') first = True for name, value in attrs.iteritems(): if first: first = False else: self.write(", ") self.id(name) self.write('=') self.id(value) self.write(']') def id(self, id): if isinstance(id, (int, float)): s = str(id) elif isinstance(id, str): if id.isalnum(): s = id else: s = self.escape(id) else: raise TypeError self.write(s) def color(self, (r, g, b)): def float2int(f): if f <= 0.0: return 0 if f >= 1.0: return 255 return int(255.0*f + 0.5) return "#" + "".join(["%02x" % float2int(c) for c in (r, g, b)]) def escape(self, s): s = s.encode('utf-8') s = s.replace('\\', r'\\') s = s.replace('\n', r'\n') s = s.replace('\t', r'\t') s = s.replace('"', r'\"') return '"' + s + '"' def write(self, s): self.fp.write(s) class Main: """Main program.""" themes = { "color": TEMPERATURE_COLORMAP, "pink": PINK_COLORMAP, "gray": GRAY_COLORMAP, "bw": BW_COLORMAP, } def main(self): """Main program.""" parser = optparse.OptionParser( usage="\n\t%prog [options] [file] ...", version="%%prog %s" % __version__) parser.add_option( '-o', '--output', metavar='FILE', type="string", dest="output", help="output filename [stdout]") parser.add_option( '-n', '--node-thres', metavar='PERCENTAGE', type="float", dest="node_thres", default=0.5, help="eliminate nodes below this threshold [default: %default]") parser.add_option( '-e', '--edge-thres', metavar='PERCENTAGE', type="float", dest="edge_thres", default=0.1, help="eliminate edges below this threshold [default: %default]") parser.add_option( '-f', '--format', type="choice", choices=('prof', 'oprofile', 'pstats', 'shark'), dest="format", default="prof", help="profile format: prof, oprofile, or pstats [default: %default]") parser.add_option( '-c', '--colormap', type="choice", choices=('color', 'pink', 'gray', 'bw'), dest="theme", default="color", help="color map: color, pink, gray, or bw [default: %default]") parser.add_option( '-s', '--strip', action="store_true", dest="strip", default=False, help="strip function parameters, template parameters, and const modifiers from demangled C++ function names") parser.add_option( '-w', '--wrap', action="store_true", dest="wrap", default=False, help="wrap function names") (self.options, self.args) = parser.parse_args(sys.argv[1:]) if len(self.args) > 1 and self.options.format != 'pstats': parser.error('incorrect number of arguments') try: self.theme = self.themes[self.options.theme] except KeyError: parser.error('invalid colormap \'%s\'' % self.options.theme) if self.options.format == 'prof': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = GprofParser(fp) elif self.options.format == 'oprofile': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = OprofileParser(fp) elif self.options.format == 'pstats': if not self.args: parser.error('at least a file must be specified for pstats input') parser = PstatsParser(*self.args) elif self.options.format == 'shark': if not self.args: fp = sys.stdin else: fp = open(self.args[0], 'rt') parser = SharkParser(fp) else: parser.error('invalid format \'%s\'' % self.options.format) self.profile = parser.parse() if self.options.output is None: self.output = sys.stdout else: self.output = open(self.options.output, 'wt') self.write_graph() _parenthesis_re = re.compile(r'\([^()]*\)') _angles_re = re.compile(r'<[^<>]*>') _const_re = re.compile(r'\s+const$') def strip_function_name(self, name): """Remove extraneous information from C++ demangled function names.""" # Strip function parameters from name by recursively removing paired parenthesis while True: name, n = self._parenthesis_re.subn('', name) if not n: break # Strip const qualifier name = self._const_re.sub('', name) # Strip template parameters from name by recursively removing paired angles while True: name, n = self._angles_re.subn('', name) if not n: break return name def wrap_function_name(self, name): """Split the function name on multiple lines.""" if len(name) > 32: ratio = 2.0/3.0 height = max(int(len(name)/(1.0 - ratio) + 0.5), 1) width = max(len(name)/height, 32) # TODO: break lines in symbols name = textwrap.fill(name, width, break_long_words=False) # Take away spaces name = name.replace(", ", ",") name = name.replace("> >", ">>") name = name.replace("> >", ">>") # catch consecutive return name def compress_function_name(self, name): """Compress function name according to the user preferences.""" if self.options.strip: name = self.strip_function_name(name) if self.options.wrap: name = self.wrap_function_name(name) # TODO: merge functions with same resulting name return name def write_graph(self): dot = DotWriter(self.output) profile = self.profile profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0) for function in profile.functions.itervalues(): function.name = self.compress_function_name(function.name) dot.graph(profile, self.theme) if __name__ == '__main__': Main().main()
53,218
Python
.py
1,328
28.725151
154
0.549634
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,395
runtests.py
midgetspy_Sick-Beard/lib/tvdb_api/tests/runtests.py
#!/usr/bin/env python #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) import sys import unittest import test_tvdb_api def main(): suite = unittest.TestSuite([ unittest.TestLoader().loadTestsFromModule(test_tvdb_api) ]) runner = unittest.TextTestRunner(verbosity=2) result = runner.run(suite) if result.wasSuccessful(): return 0 else: return 1 if __name__ == '__main__': sys.exit( int(main()) )
555
Python
.py
23
19.956522
64
0.680688
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,396
__init__.py
midgetspy_Sick-Beard/lib/pythontwitter/__init__.py
#!/usr/bin/env python # # vim: sw=2 ts=2 sts=2 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''A library that provides a Python interface to the Twitter API''' __author__ = '[email protected]' __version__ = '1.0.1' import base64 import calendar import datetime import httplib import os import rfc822 import sys import tempfile import textwrap import time import urllib import urllib2 import urlparse import gzip import StringIO try: # Python >= 2.6 import json as simplejson except ImportError: try: # Python < 2.6 import lib.simplejson as simplejson except ImportError: try: # Google App Engine from django.utils import simplejson except ImportError: raise ImportError, "Unable to load a json library" # parse_qsl moved to urlparse module in v2.6 try: from urlparse import parse_qsl, parse_qs except ImportError: from cgi import parse_qsl, parse_qs try: from hashlib import md5 except ImportError: from md5 import md5 import lib.oauth2 as oauth CHARACTER_LIMIT = 140 # A singleton representing a lazily instantiated FileCache. DEFAULT_CACHE = object() REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token' ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token' AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize' SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate' class TwitterError(Exception): '''Base class for Twitter errors''' @property def message(self): '''Returns the first argument used to construct this error.''' return self.args[0] class Status(object): '''A class representing the Status structure used by the twitter API. The Status structure exposes the following properties: status.created_at status.created_at_in_seconds # read only status.favorited status.favorite_count status.in_reply_to_screen_name status.in_reply_to_user_id status.in_reply_to_status_id status.truncated status.source status.id status.text status.location status.relative_created_at # read only status.user status.urls status.user_mentions status.hashtags status.geo status.place status.coordinates status.contributors ''' def __init__(self, created_at=None, favorited=None, favorite_count=None, id=None, text=None, location=None, user=None, in_reply_to_screen_name=None, in_reply_to_user_id=None, in_reply_to_status_id=None, truncated=None, source=None, now=None, urls=None, user_mentions=None, hashtags=None, media=None, geo=None, place=None, coordinates=None, contributors=None, retweeted=None, retweeted_status=None, current_user_retweet=None, retweet_count=None, possibly_sensitive=None, scopes=None, withheld_copyright=None, withheld_in_countries=None, withheld_scope=None): '''An object to hold a Twitter status message. This class is normally instantiated by the twitter.Api class and returned in a sequence. Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007" Args: created_at: The time this status message was posted. [Optional] favorited: Whether this is a favorite of the authenticated user. [Optional] favorite_count: Number of times this status message has been favorited. [Optional] id: The unique id of this status message. [Optional] text: The text of this status message. [Optional] location: the geolocation string associated with this message. [Optional] relative_created_at: A human readable string representing the posting time. [Optional] user: A twitter.User instance representing the person posting the message. [Optional] now: The current time, if the client chooses to set it. Defaults to the wall clock time. [Optional] urls: user_mentions: hashtags: geo: place: coordinates: contributors: retweeted: retweeted_status: current_user_retweet: retweet_count: possibly_sensitive: scopes: withheld_copyright: withheld_in_countries: withheld_scope: ''' self.created_at = created_at self.favorited = favorited self.favorite_count = favorite_count self.id = id self.text = text self.location = location self.user = user self.now = now self.in_reply_to_screen_name = in_reply_to_screen_name self.in_reply_to_user_id = in_reply_to_user_id self.in_reply_to_status_id = in_reply_to_status_id self.truncated = truncated self.retweeted = retweeted self.source = source self.urls = urls self.user_mentions = user_mentions self.hashtags = hashtags self.media = media self.geo = geo self.place = place self.coordinates = coordinates self.contributors = contributors self.retweeted_status = retweeted_status self.current_user_retweet = current_user_retweet self.retweet_count = retweet_count self.possibly_sensitive = possibly_sensitive self.scopes = scopes self.withheld_copyright = withheld_copyright self.withheld_in_countries = withheld_in_countries self.withheld_scope = withheld_scope def GetCreatedAt(self): '''Get the time this status message was posted. Returns: The time this status message was posted ''' return self._created_at def SetCreatedAt(self, created_at): '''Set the time this status message was posted. Args: created_at: The time this status message was created ''' self._created_at = created_at created_at = property(GetCreatedAt, SetCreatedAt, doc='The time this status message was posted.') def GetCreatedAtInSeconds(self): '''Get the time this status message was posted, in seconds since the epoch. Returns: The time this status message was posted, in seconds since the epoch. ''' return calendar.timegm(rfc822.parsedate(self.created_at)) created_at_in_seconds = property(GetCreatedAtInSeconds, doc="The time this status message was " "posted, in seconds since the epoch") def GetFavorited(self): '''Get the favorited setting of this status message. Returns: True if this status message is favorited; False otherwise ''' return self._favorited def SetFavorited(self, favorited): '''Set the favorited state of this status message. Args: favorited: boolean True/False favorited state of this status message ''' self._favorited = favorited favorited = property(GetFavorited, SetFavorited, doc='The favorited state of this status message.') def GetFavoriteCount(self): '''Get the favorite count of this status message. Returns: number of times this status message has been favorited ''' return self._favorite_count def SetFavoriteCount(self, favorite_count): '''Set the favorited state of this status message. Args: favorite_count: int number of favorites for this status message ''' self._favorite_count = favorite_count favorite_count = property(GetFavoriteCount, SetFavoriteCount, doc='The number of favorites for this status message.') def GetId(self): '''Get the unique id of this status message. Returns: The unique id of this status message ''' return self._id def SetId(self, id): '''Set the unique id of this status message. Args: id: The unique id of this status message ''' self._id = id id = property(GetId, SetId, doc='The unique id of this status message.') def GetInReplyToScreenName(self): return self._in_reply_to_screen_name def SetInReplyToScreenName(self, in_reply_to_screen_name): self._in_reply_to_screen_name = in_reply_to_screen_name in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName, doc='') def GetInReplyToUserId(self): return self._in_reply_to_user_id def SetInReplyToUserId(self, in_reply_to_user_id): self._in_reply_to_user_id = in_reply_to_user_id in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId, doc='') def GetInReplyToStatusId(self): return self._in_reply_to_status_id def SetInReplyToStatusId(self, in_reply_to_status_id): self._in_reply_to_status_id = in_reply_to_status_id in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId, doc='') def GetTruncated(self): return self._truncated def SetTruncated(self, truncated): self._truncated = truncated truncated = property(GetTruncated, SetTruncated, doc='') def GetRetweeted(self): return self._retweeted def SetRetweeted(self, retweeted): self._retweeted = retweeted retweeted = property(GetRetweeted, SetRetweeted, doc='') def GetSource(self): return self._source def SetSource(self, source): self._source = source source = property(GetSource, SetSource, doc='') def GetText(self): '''Get the text of this status message. Returns: The text of this status message. ''' return self._text def SetText(self, text): '''Set the text of this status message. Args: text: The text of this status message ''' self._text = text text = property(GetText, SetText, doc='The text of this status message') def GetLocation(self): '''Get the geolocation associated with this status message Returns: The geolocation string of this status message. ''' return self._location def SetLocation(self, location): '''Set the geolocation associated with this status message Args: location: The geolocation string of this status message ''' self._location = location location = property(GetLocation, SetLocation, doc='The geolocation string of this status message') def GetRelativeCreatedAt(self): '''Get a human readable string representing the posting time Returns: A human readable string representing the posting time ''' fudge = 1.25 delta = long(self.now) - long(self.created_at_in_seconds) if delta < (1 * fudge): return 'about a second ago' elif delta < (60 * (1/fudge)): return 'about %d seconds ago' % (delta) elif delta < (60 * fudge): return 'about a minute ago' elif delta < (60 * 60 * (1/fudge)): return 'about %d minutes ago' % (delta / 60) elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1: return 'about an hour ago' elif delta < (60 * 60 * 24 * (1/fudge)): return 'about %d hours ago' % (delta / (60 * 60)) elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1: return 'about a day ago' else: return 'about %d days ago' % (delta / (60 * 60 * 24)) relative_created_at = property(GetRelativeCreatedAt, doc='Get a human readable string representing ' 'the posting time') def GetUser(self): '''Get a twitter.User representing the entity posting this status message. Returns: A twitter.User representing the entity posting this status message ''' return self._user def SetUser(self, user): '''Set a twitter.User representing the entity posting this status message. Args: user: A twitter.User representing the entity posting this status message ''' self._user = user user = property(GetUser, SetUser, doc='A twitter.User representing the entity posting this ' 'status message') def GetNow(self): '''Get the wallclock time for this status message. Used to calculate relative_created_at. Defaults to the time the object was instantiated. Returns: Whatever the status instance believes the current time to be, in seconds since the epoch. ''' if self._now is None: self._now = time.time() return self._now def SetNow(self, now): '''Set the wallclock time for this status message. Used to calculate relative_created_at. Defaults to the time the object was instantiated. Args: now: The wallclock time for this instance. ''' self._now = now now = property(GetNow, SetNow, doc='The wallclock time for this status instance.') def GetGeo(self): return self._geo def SetGeo(self, geo): self._geo = geo geo = property(GetGeo, SetGeo, doc='') def GetPlace(self): return self._place def SetPlace(self, place): self._place = place place = property(GetPlace, SetPlace, doc='') def GetCoordinates(self): return self._coordinates def SetCoordinates(self, coordinates): self._coordinates = coordinates coordinates = property(GetCoordinates, SetCoordinates, doc='') def GetContributors(self): return self._contributors def SetContributors(self, contributors): self._contributors = contributors contributors = property(GetContributors, SetContributors, doc='') def GetRetweeted_status(self): return self._retweeted_status def SetRetweeted_status(self, retweeted_status): self._retweeted_status = retweeted_status retweeted_status = property(GetRetweeted_status, SetRetweeted_status, doc='') def GetRetweetCount(self): return self._retweet_count def SetRetweetCount(self, retweet_count): self._retweet_count = retweet_count retweet_count = property(GetRetweetCount, SetRetweetCount, doc='') def GetCurrent_user_retweet(self): return self._current_user_retweet def SetCurrent_user_retweet(self, current_user_retweet): self._current_user_retweet = current_user_retweet current_user_retweet = property(GetCurrent_user_retweet, SetCurrent_user_retweet, doc='') def GetPossibly_sensitive(self): return self._possibly_sensitive def SetPossibly_sensitive(self, possibly_sensitive): self._possibly_sensitive = possibly_sensitive possibly_sensitive = property(GetPossibly_sensitive, SetPossibly_sensitive, doc='') def GetScopes(self): return self._scopes def SetScopes(self, scopes): self._scopes = scopes scopes = property(GetScopes, SetScopes, doc='') def GetWithheld_copyright(self): return self._withheld_copyright def SetWithheld_copyright(self, withheld_copyright): self._withheld_copyright = withheld_copyright withheld_copyright = property(GetWithheld_copyright, SetWithheld_copyright, doc='') def GetWithheld_in_countries(self): return self._withheld_in_countries def SetWithheld_in_countries(self, withheld_in_countries): self._withheld_in_countries = withheld_in_countries withheld_in_countries = property(GetWithheld_in_countries, SetWithheld_in_countries, doc='') def GetWithheld_scope(self): return self._withheld_scope def SetWithheld_scope(self, withheld_scope): self._withheld_scope = withheld_scope withheld_scope = property(GetWithheld_scope, SetWithheld_scope, doc='') def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.created_at == other.created_at and \ self.id == other.id and \ self.text == other.text and \ self.location == other.location and \ self.user == other.user and \ self.in_reply_to_screen_name == other.in_reply_to_screen_name and \ self.in_reply_to_user_id == other.in_reply_to_user_id and \ self.in_reply_to_status_id == other.in_reply_to_status_id and \ self.truncated == other.truncated and \ self.retweeted == other.retweeted and \ self.favorited == other.favorited and \ self.favorite_count == other.favorite_count and \ self.source == other.source and \ self.geo == other.geo and \ self.place == other.place and \ self.coordinates == other.coordinates and \ self.contributors == other.contributors and \ self.retweeted_status == other.retweeted_status and \ self.retweet_count == other.retweet_count and \ self.current_user_retweet == other.current_user_retweet and \ self.possibly_sensitive == other.possibly_sensitive and \ self.scopes == other.scopes and \ self.withheld_copyright == other.withheld_copyright and \ self.withheld_in_countries == other.withheld_in_countries and \ self.withheld_scope == other.withheld_scope except AttributeError: return False def __str__(self): '''A string representation of this twitter.Status instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.Status instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this twitter.Status instance. Returns: A JSON string representation of this twitter.Status instance ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): '''A dict representation of this twitter.Status instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.Status instance ''' data = {} if self.created_at: data['created_at'] = self.created_at if self.favorited: data['favorited'] = self.favorited if self.favorite_count: data['favorite_count'] = self.favorite_count if self.id: data['id'] = self.id if self.text: data['text'] = self.text if self.location: data['location'] = self.location if self.user: data['user'] = self.user.AsDict() if self.in_reply_to_screen_name: data['in_reply_to_screen_name'] = self.in_reply_to_screen_name if self.in_reply_to_user_id: data['in_reply_to_user_id'] = self.in_reply_to_user_id if self.in_reply_to_status_id: data['in_reply_to_status_id'] = self.in_reply_to_status_id if self.truncated is not None: data['truncated'] = self.truncated if self.retweeted is not None: data['retweeted'] = self.retweeted if self.favorited is not None: data['favorited'] = self.favorited if self.source: data['source'] = self.source if self.geo: data['geo'] = self.geo if self.place: data['place'] = self.place if self.coordinates: data['coordinates'] = self.coordinates if self.contributors: data['contributors'] = self.contributors if self.hashtags: data['hashtags'] = [h.text for h in self.hashtags] if self.retweeted_status: data['retweeted_status'] = self.retweeted_status.AsDict() if self.retweet_count: data['retweet_count'] = self.retweet_count if self.urls: data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls]) if self.user_mentions: data['user_mentions'] = [um.AsDict() for um in self.user_mentions] if self.current_user_retweet: data['current_user_retweet'] = self.current_user_retweet if self.possibly_sensitive: data['possibly_sensitive'] = self.possibly_sensitive if self.scopes: data['scopes'] = self.scopes if self.withheld_copyright: data['withheld_copyright'] = self.withheld_copyright if self.withheld_in_countries: data['withheld_in_countries'] = self.withheld_in_countries if self.withheld_scope: data['withheld_scope'] = self.withheld_scope return data @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.Status instance ''' if 'user' in data: user = User.NewFromJsonDict(data['user']) else: user = None if 'retweeted_status' in data: retweeted_status = Status.NewFromJsonDict(data['retweeted_status']) else: retweeted_status = None if 'current_user_retweet' in data: current_user_retweet = data['current_user_retweet']['id'] else: current_user_retweet = None urls = None user_mentions = None hashtags = None media = None if 'entities' in data: if 'urls' in data['entities']: urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']] if 'user_mentions' in data['entities']: user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']] if 'hashtags' in data['entities']: hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']] if 'media' in data['entities']: media = data['entities']['media'] else: media = [] return Status(created_at=data.get('created_at', None), favorited=data.get('favorited', None), favorite_count=data.get('favorite_count', None), id=data.get('id', None), text=data.get('text', None), location=data.get('location', None), in_reply_to_screen_name=data.get('in_reply_to_screen_name', None), in_reply_to_user_id=data.get('in_reply_to_user_id', None), in_reply_to_status_id=data.get('in_reply_to_status_id', None), truncated=data.get('truncated', None), retweeted=data.get('retweeted', None), source=data.get('source', None), user=user, urls=urls, user_mentions=user_mentions, hashtags=hashtags, media=media, geo=data.get('geo', None), place=data.get('place', None), coordinates=data.get('coordinates', None), contributors=data.get('contributors', None), retweeted_status=retweeted_status, current_user_retweet=current_user_retweet, retweet_count=data.get('retweet_count', None), possibly_sensitive=data.get('possibly_sensitive', None), scopes=data.get('scopes', None), withheld_copyright=data.get('withheld_copyright', None), withheld_in_countries=data.get('withheld_in_countries', None), withheld_scope=data.get('withheld_scope', None)) class User(object): '''A class representing the User structure used by the twitter API. The User structure exposes the following properties: user.id user.name user.screen_name user.location user.description user.profile_image_url user.profile_background_tile user.profile_background_image_url user.profile_sidebar_fill_color user.profile_background_color user.profile_link_color user.profile_text_color user.protected user.utc_offset user.time_zone user.url user.status user.statuses_count user.followers_count user.friends_count user.favourites_count user.geo_enabled user.verified user.lang user.notifications user.contributors_enabled user.created_at user.listed_count ''' def __init__(self, id=None, name=None, screen_name=None, location=None, description=None, profile_image_url=None, profile_background_tile=None, profile_background_image_url=None, profile_sidebar_fill_color=None, profile_background_color=None, profile_link_color=None, profile_text_color=None, protected=None, utc_offset=None, time_zone=None, followers_count=None, friends_count=None, statuses_count=None, favourites_count=None, url=None, status=None, geo_enabled=None, verified=None, lang=None, notifications=None, contributors_enabled=None, created_at=None, listed_count=None): self.id = id self.name = name self.screen_name = screen_name self.location = location self.description = description self.profile_image_url = profile_image_url self.profile_background_tile = profile_background_tile self.profile_background_image_url = profile_background_image_url self.profile_sidebar_fill_color = profile_sidebar_fill_color self.profile_background_color = profile_background_color self.profile_link_color = profile_link_color self.profile_text_color = profile_text_color self.protected = protected self.utc_offset = utc_offset self.time_zone = time_zone self.followers_count = followers_count self.friends_count = friends_count self.statuses_count = statuses_count self.favourites_count = favourites_count self.url = url self.status = status self.geo_enabled = geo_enabled self.verified = verified self.lang = lang self.notifications = notifications self.contributors_enabled = contributors_enabled self.created_at = created_at self.listed_count = listed_count def GetId(self): '''Get the unique id of this user. Returns: The unique id of this user ''' return self._id def SetId(self, id): '''Set the unique id of this user. Args: id: The unique id of this user. ''' self._id = id id = property(GetId, SetId, doc='The unique id of this user.') def GetName(self): '''Get the real name of this user. Returns: The real name of this user ''' return self._name def SetName(self, name): '''Set the real name of this user. Args: name: The real name of this user ''' self._name = name name = property(GetName, SetName, doc='The real name of this user.') def GetScreenName(self): '''Get the short twitter name of this user. Returns: The short twitter name of this user ''' return self._screen_name def SetScreenName(self, screen_name): '''Set the short twitter name of this user. Args: screen_name: the short twitter name of this user ''' self._screen_name = screen_name screen_name = property(GetScreenName, SetScreenName, doc='The short twitter name of this user.') def GetLocation(self): '''Get the geographic location of this user. Returns: The geographic location of this user ''' return self._location def SetLocation(self, location): '''Set the geographic location of this user. Args: location: The geographic location of this user ''' self._location = location location = property(GetLocation, SetLocation, doc='The geographic location of this user.') def GetDescription(self): '''Get the short text description of this user. Returns: The short text description of this user ''' return self._description def SetDescription(self, description): '''Set the short text description of this user. Args: description: The short text description of this user ''' self._description = description description = property(GetDescription, SetDescription, doc='The short text description of this user.') def GetUrl(self): '''Get the homepage url of this user. Returns: The homepage url of this user ''' return self._url def SetUrl(self, url): '''Set the homepage url of this user. Args: url: The homepage url of this user ''' self._url = url url = property(GetUrl, SetUrl, doc='The homepage url of this user.') def GetProfileImageUrl(self): '''Get the url of the thumbnail of this user. Returns: The url of the thumbnail of this user ''' return self._profile_image_url def SetProfileImageUrl(self, profile_image_url): '''Set the url of the thumbnail of this user. Args: profile_image_url: The url of the thumbnail of this user ''' self._profile_image_url = profile_image_url profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl, doc='The url of the thumbnail of this user.') def GetProfileBackgroundTile(self): '''Boolean for whether to tile the profile background image. Returns: True if the background is to be tiled, False if not, None if unset. ''' return self._profile_background_tile def SetProfileBackgroundTile(self, profile_background_tile): '''Set the boolean flag for whether to tile the profile background image. Args: profile_background_tile: Boolean flag for whether to tile or not. ''' self._profile_background_tile = profile_background_tile profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile, doc='Boolean for whether to tile the background image.') def GetProfileBackgroundImageUrl(self): return self._profile_background_image_url def SetProfileBackgroundImageUrl(self, profile_background_image_url): self._profile_background_image_url = profile_background_image_url profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl, doc='The url of the profile background of this user.') def GetProfileSidebarFillColor(self): return self._profile_sidebar_fill_color def SetProfileSidebarFillColor(self, profile_sidebar_fill_color): self._profile_sidebar_fill_color = profile_sidebar_fill_color profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor) def GetProfileBackgroundColor(self): return self._profile_background_color def SetProfileBackgroundColor(self, profile_background_color): self._profile_background_color = profile_background_color profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor) def GetProfileLinkColor(self): return self._profile_link_color def SetProfileLinkColor(self, profile_link_color): self._profile_link_color = profile_link_color profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor) def GetProfileTextColor(self): return self._profile_text_color def SetProfileTextColor(self, profile_text_color): self._profile_text_color = profile_text_color profile_text_color = property(GetProfileTextColor, SetProfileTextColor) def GetProtected(self): return self._protected def SetProtected(self, protected): self._protected = protected protected = property(GetProtected, SetProtected) def GetUtcOffset(self): return self._utc_offset def SetUtcOffset(self, utc_offset): self._utc_offset = utc_offset utc_offset = property(GetUtcOffset, SetUtcOffset) def GetTimeZone(self): '''Returns the current time zone string for the user. Returns: The descriptive time zone string for the user. ''' return self._time_zone def SetTimeZone(self, time_zone): '''Sets the user's time zone string. Args: time_zone: The descriptive time zone to assign for the user. ''' self._time_zone = time_zone time_zone = property(GetTimeZone, SetTimeZone) def GetStatus(self): '''Get the latest twitter.Status of this user. Returns: The latest twitter.Status of this user ''' return self._status def SetStatus(self, status): '''Set the latest twitter.Status of this user. Args: status: The latest twitter.Status of this user ''' self._status = status status = property(GetStatus, SetStatus, doc='The latest twitter.Status of this user.') def GetFriendsCount(self): '''Get the friend count for this user. Returns: The number of users this user has befriended. ''' return self._friends_count def SetFriendsCount(self, count): '''Set the friend count for this user. Args: count: The number of users this user has befriended. ''' self._friends_count = count friends_count = property(GetFriendsCount, SetFriendsCount, doc='The number of friends for this user.') def GetListedCount(self): '''Get the listed count for this user. Returns: The number of lists this user belongs to. ''' return self._listed_count def SetListedCount(self, count): '''Set the listed count for this user. Args: count: The number of lists this user belongs to. ''' self._listed_count = count listed_count = property(GetListedCount, SetListedCount, doc='The number of lists this user belongs to.') def GetFollowersCount(self): '''Get the follower count for this user. Returns: The number of users following this user. ''' return self._followers_count def SetFollowersCount(self, count): '''Set the follower count for this user. Args: count: The number of users following this user. ''' self._followers_count = count followers_count = property(GetFollowersCount, SetFollowersCount, doc='The number of users following this user.') def GetStatusesCount(self): '''Get the number of status updates for this user. Returns: The number of status updates for this user. ''' return self._statuses_count def SetStatusesCount(self, count): '''Set the status update count for this user. Args: count: The number of updates for this user. ''' self._statuses_count = count statuses_count = property(GetStatusesCount, SetStatusesCount, doc='The number of updates for this user.') def GetFavouritesCount(self): '''Get the number of favourites for this user. Returns: The number of favourites for this user. ''' return self._favourites_count def SetFavouritesCount(self, count): '''Set the favourite count for this user. Args: count: The number of favourites for this user. ''' self._favourites_count = count favourites_count = property(GetFavouritesCount, SetFavouritesCount, doc='The number of favourites for this user.') def GetGeoEnabled(self): '''Get the setting of geo_enabled for this user. Returns: True/False if Geo tagging is enabled ''' return self._geo_enabled def SetGeoEnabled(self, geo_enabled): '''Set the latest twitter.geo_enabled of this user. Args: geo_enabled: True/False if Geo tagging is to be enabled ''' self._geo_enabled = geo_enabled geo_enabled = property(GetGeoEnabled, SetGeoEnabled, doc='The value of twitter.geo_enabled for this user.') def GetVerified(self): '''Get the setting of verified for this user. Returns: True/False if user is a verified account ''' return self._verified def SetVerified(self, verified): '''Set twitter.verified for this user. Args: verified: True/False if user is a verified account ''' self._verified = verified verified = property(GetVerified, SetVerified, doc='The value of twitter.verified for this user.') def GetLang(self): '''Get the setting of lang for this user. Returns: language code of the user ''' return self._lang def SetLang(self, lang): '''Set twitter.lang for this user. Args: lang: language code for the user ''' self._lang = lang lang = property(GetLang, SetLang, doc='The value of twitter.lang for this user.') def GetNotifications(self): '''Get the setting of notifications for this user. Returns: True/False for the notifications setting of the user ''' return self._notifications def SetNotifications(self, notifications): '''Set twitter.notifications for this user. Args: notifications: True/False notifications setting for the user ''' self._notifications = notifications notifications = property(GetNotifications, SetNotifications, doc='The value of twitter.notifications for this user.') def GetContributorsEnabled(self): '''Get the setting of contributors_enabled for this user. Returns: True/False contributors_enabled of the user ''' return self._contributors_enabled def SetContributorsEnabled(self, contributors_enabled): '''Set twitter.contributors_enabled for this user. Args: contributors_enabled: True/False contributors_enabled setting for the user ''' self._contributors_enabled = contributors_enabled contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled, doc='The value of twitter.contributors_enabled for this user.') def GetCreatedAt(self): '''Get the setting of created_at for this user. Returns: created_at value of the user ''' return self._created_at def SetCreatedAt(self, created_at): '''Set twitter.created_at for this user. Args: created_at: created_at value for the user ''' self._created_at = created_at created_at = property(GetCreatedAt, SetCreatedAt, doc='The value of twitter.created_at for this user.') def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.id == other.id and \ self.name == other.name and \ self.screen_name == other.screen_name and \ self.location == other.location and \ self.description == other.description and \ self.profile_image_url == other.profile_image_url and \ self.profile_background_tile == other.profile_background_tile and \ self.profile_background_image_url == other.profile_background_image_url and \ self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \ self.profile_background_color == other.profile_background_color and \ self.profile_link_color == other.profile_link_color and \ self.profile_text_color == other.profile_text_color and \ self.protected == other.protected and \ self.utc_offset == other.utc_offset and \ self.time_zone == other.time_zone and \ self.url == other.url and \ self.statuses_count == other.statuses_count and \ self.followers_count == other.followers_count and \ self.favourites_count == other.favourites_count and \ self.friends_count == other.friends_count and \ self.status == other.status and \ self.geo_enabled == other.geo_enabled and \ self.verified == other.verified and \ self.lang == other.lang and \ self.notifications == other.notifications and \ self.contributors_enabled == other.contributors_enabled and \ self.created_at == other.created_at and \ self.listed_count == other.listed_count except AttributeError: return False def __str__(self): '''A string representation of this twitter.User instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.User instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this twitter.User instance. Returns: A JSON string representation of this twitter.User instance ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): '''A dict representation of this twitter.User instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.User instance ''' data = {} if self.id: data['id'] = self.id if self.name: data['name'] = self.name if self.screen_name: data['screen_name'] = self.screen_name if self.location: data['location'] = self.location if self.description: data['description'] = self.description if self.profile_image_url: data['profile_image_url'] = self.profile_image_url if self.profile_background_tile is not None: data['profile_background_tile'] = self.profile_background_tile if self.profile_background_image_url: data['profile_sidebar_fill_color'] = self.profile_background_image_url if self.profile_background_color: data['profile_background_color'] = self.profile_background_color if self.profile_link_color: data['profile_link_color'] = self.profile_link_color if self.profile_text_color: data['profile_text_color'] = self.profile_text_color if self.protected is not None: data['protected'] = self.protected if self.utc_offset: data['utc_offset'] = self.utc_offset if self.time_zone: data['time_zone'] = self.time_zone if self.url: data['url'] = self.url if self.status: data['status'] = self.status.AsDict() if self.friends_count: data['friends_count'] = self.friends_count if self.followers_count: data['followers_count'] = self.followers_count if self.statuses_count: data['statuses_count'] = self.statuses_count if self.favourites_count: data['favourites_count'] = self.favourites_count if self.geo_enabled: data['geo_enabled'] = self.geo_enabled if self.verified: data['verified'] = self.verified if self.lang: data['lang'] = self.lang if self.notifications: data['notifications'] = self.notifications if self.contributors_enabled: data['contributors_enabled'] = self.contributors_enabled if self.created_at: data['created_at'] = self.created_at if self.listed_count: data['listed_count'] = self.listed_count return data @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.User instance ''' if 'status' in data: status = Status.NewFromJsonDict(data['status']) else: status = None return User(id=data.get('id', None), name=data.get('name', None), screen_name=data.get('screen_name', None), location=data.get('location', None), description=data.get('description', None), statuses_count=data.get('statuses_count', None), followers_count=data.get('followers_count', None), favourites_count=data.get('favourites_count', None), friends_count=data.get('friends_count', None), profile_image_url=data.get('profile_image_url_https', data.get('profile_image_url', None)), profile_background_tile = data.get('profile_background_tile', None), profile_background_image_url = data.get('profile_background_image_url', None), profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None), profile_background_color = data.get('profile_background_color', None), profile_link_color = data.get('profile_link_color', None), profile_text_color = data.get('profile_text_color', None), protected = data.get('protected', None), utc_offset = data.get('utc_offset', None), time_zone = data.get('time_zone', None), url=data.get('url', None), status=status, geo_enabled=data.get('geo_enabled', None), verified=data.get('verified', None), lang=data.get('lang', None), notifications=data.get('notifications', None), contributors_enabled=data.get('contributors_enabled', None), created_at=data.get('created_at', None), listed_count=data.get('listed_count', None)) class List(object): '''A class representing the List structure used by the twitter API. The List structure exposes the following properties: list.id list.name list.slug list.description list.full_name list.mode list.uri list.member_count list.subscriber_count list.following ''' def __init__(self, id=None, name=None, slug=None, description=None, full_name=None, mode=None, uri=None, member_count=None, subscriber_count=None, following=None, user=None): self.id = id self.name = name self.slug = slug self.description = description self.full_name = full_name self.mode = mode self.uri = uri self.member_count = member_count self.subscriber_count = subscriber_count self.following = following self.user = user def GetId(self): '''Get the unique id of this list. Returns: The unique id of this list ''' return self._id def SetId(self, id): '''Set the unique id of this list. Args: id: The unique id of this list. ''' self._id = id id = property(GetId, SetId, doc='The unique id of this list.') def GetName(self): '''Get the real name of this list. Returns: The real name of this list ''' return self._name def SetName(self, name): '''Set the real name of this list. Args: name: The real name of this list ''' self._name = name name = property(GetName, SetName, doc='The real name of this list.') def GetSlug(self): '''Get the slug of this list. Returns: The slug of this list ''' return self._slug def SetSlug(self, slug): '''Set the slug of this list. Args: slug: The slug of this list. ''' self._slug = slug slug = property(GetSlug, SetSlug, doc='The slug of this list.') def GetDescription(self): '''Get the description of this list. Returns: The description of this list ''' return self._description def SetDescription(self, description): '''Set the description of this list. Args: description: The description of this list. ''' self._description = description description = property(GetDescription, SetDescription, doc='The description of this list.') def GetFull_name(self): '''Get the full_name of this list. Returns: The full_name of this list ''' return self._full_name def SetFull_name(self, full_name): '''Set the full_name of this list. Args: full_name: The full_name of this list. ''' self._full_name = full_name full_name = property(GetFull_name, SetFull_name, doc='The full_name of this list.') def GetMode(self): '''Get the mode of this list. Returns: The mode of this list ''' return self._mode def SetMode(self, mode): '''Set the mode of this list. Args: mode: The mode of this list. ''' self._mode = mode mode = property(GetMode, SetMode, doc='The mode of this list.') def GetUri(self): '''Get the uri of this list. Returns: The uri of this list ''' return self._uri def SetUri(self, uri): '''Set the uri of this list. Args: uri: The uri of this list. ''' self._uri = uri uri = property(GetUri, SetUri, doc='The uri of this list.') def GetMember_count(self): '''Get the member_count of this list. Returns: The member_count of this list ''' return self._member_count def SetMember_count(self, member_count): '''Set the member_count of this list. Args: member_count: The member_count of this list. ''' self._member_count = member_count member_count = property(GetMember_count, SetMember_count, doc='The member_count of this list.') def GetSubscriber_count(self): '''Get the subscriber_count of this list. Returns: The subscriber_count of this list ''' return self._subscriber_count def SetSubscriber_count(self, subscriber_count): '''Set the subscriber_count of this list. Args: subscriber_count: The subscriber_count of this list. ''' self._subscriber_count = subscriber_count subscriber_count = property(GetSubscriber_count, SetSubscriber_count, doc='The subscriber_count of this list.') def GetFollowing(self): '''Get the following status of this list. Returns: The following status of this list ''' return self._following def SetFollowing(self, following): '''Set the following status of this list. Args: following: The following of this list. ''' self._following = following following = property(GetFollowing, SetFollowing, doc='The following status of this list.') def GetUser(self): '''Get the user of this list. Returns: The owner of this list ''' return self._user def SetUser(self, user): '''Set the user of this list. Args: user: The owner of this list. ''' self._user = user user = property(GetUser, SetUser, doc='The owner of this list.') def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.id == other.id and \ self.name == other.name and \ self.slug == other.slug and \ self.description == other.description and \ self.full_name == other.full_name and \ self.mode == other.mode and \ self.uri == other.uri and \ self.member_count == other.member_count and \ self.subscriber_count == other.subscriber_count and \ self.following == other.following and \ self.user == other.user except AttributeError: return False def __str__(self): '''A string representation of this twitter.List instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.List instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this twitter.List instance. Returns: A JSON string representation of this twitter.List instance ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): '''A dict representation of this twitter.List instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.List instance ''' data = {} if self.id: data['id'] = self.id if self.name: data['name'] = self.name if self.slug: data['slug'] = self.slug if self.description: data['description'] = self.description if self.full_name: data['full_name'] = self.full_name if self.mode: data['mode'] = self.mode if self.uri: data['uri'] = self.uri if self.member_count is not None: data['member_count'] = self.member_count if self.subscriber_count is not None: data['subscriber_count'] = self.subscriber_count if self.following is not None: data['following'] = self.following if self.user is not None: data['user'] = self.user.AsDict() return data @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.List instance ''' if 'user' in data: user = User.NewFromJsonDict(data['user']) else: user = None return List(id=data.get('id', None), name=data.get('name', None), slug=data.get('slug', None), description=data.get('description', None), full_name=data.get('full_name', None), mode=data.get('mode', None), uri=data.get('uri', None), member_count=data.get('member_count', None), subscriber_count=data.get('subscriber_count', None), following=data.get('following', None), user=user) class DirectMessage(object): '''A class representing the DirectMessage structure used by the twitter API. The DirectMessage structure exposes the following properties: direct_message.id direct_message.created_at direct_message.created_at_in_seconds # read only direct_message.sender_id direct_message.sender_screen_name direct_message.recipient_id direct_message.recipient_screen_name direct_message.text ''' def __init__(self, id=None, created_at=None, sender_id=None, sender_screen_name=None, recipient_id=None, recipient_screen_name=None, text=None): '''An object to hold a Twitter direct message. This class is normally instantiated by the twitter.Api class and returned in a sequence. Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007" Args: id: The unique id of this direct message. [Optional] created_at: The time this direct message was posted. [Optional] sender_id: The id of the twitter user that sent this message. [Optional] sender_screen_name: The name of the twitter user that sent this message. [Optional] recipient_id: The id of the twitter that received this message. [Optional] recipient_screen_name: The name of the twitter that received this message. [Optional] text: The text of this direct message. [Optional] ''' self.id = id self.created_at = created_at self.sender_id = sender_id self.sender_screen_name = sender_screen_name self.recipient_id = recipient_id self.recipient_screen_name = recipient_screen_name self.text = text def GetId(self): '''Get the unique id of this direct message. Returns: The unique id of this direct message ''' return self._id def SetId(self, id): '''Set the unique id of this direct message. Args: id: The unique id of this direct message ''' self._id = id id = property(GetId, SetId, doc='The unique id of this direct message.') def GetCreatedAt(self): '''Get the time this direct message was posted. Returns: The time this direct message was posted ''' return self._created_at def SetCreatedAt(self, created_at): '''Set the time this direct message was posted. Args: created_at: The time this direct message was created ''' self._created_at = created_at created_at = property(GetCreatedAt, SetCreatedAt, doc='The time this direct message was posted.') def GetCreatedAtInSeconds(self): '''Get the time this direct message was posted, in seconds since the epoch. Returns: The time this direct message was posted, in seconds since the epoch. ''' return calendar.timegm(rfc822.parsedate(self.created_at)) created_at_in_seconds = property(GetCreatedAtInSeconds, doc="The time this direct message was " "posted, in seconds since the epoch") def GetSenderId(self): '''Get the unique sender id of this direct message. Returns: The unique sender id of this direct message ''' return self._sender_id def SetSenderId(self, sender_id): '''Set the unique sender id of this direct message. Args: sender_id: The unique sender id of this direct message ''' self._sender_id = sender_id sender_id = property(GetSenderId, SetSenderId, doc='The unique sender id of this direct message.') def GetSenderScreenName(self): '''Get the unique sender screen name of this direct message. Returns: The unique sender screen name of this direct message ''' return self._sender_screen_name def SetSenderScreenName(self, sender_screen_name): '''Set the unique sender screen name of this direct message. Args: sender_screen_name: The unique sender screen name of this direct message ''' self._sender_screen_name = sender_screen_name sender_screen_name = property(GetSenderScreenName, SetSenderScreenName, doc='The unique sender screen name of this direct message.') def GetRecipientId(self): '''Get the unique recipient id of this direct message. Returns: The unique recipient id of this direct message ''' return self._recipient_id def SetRecipientId(self, recipient_id): '''Set the unique recipient id of this direct message. Args: recipient_id: The unique recipient id of this direct message ''' self._recipient_id = recipient_id recipient_id = property(GetRecipientId, SetRecipientId, doc='The unique recipient id of this direct message.') def GetRecipientScreenName(self): '''Get the unique recipient screen name of this direct message. Returns: The unique recipient screen name of this direct message ''' return self._recipient_screen_name def SetRecipientScreenName(self, recipient_screen_name): '''Set the unique recipient screen name of this direct message. Args: recipient_screen_name: The unique recipient screen name of this direct message ''' self._recipient_screen_name = recipient_screen_name recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName, doc='The unique recipient screen name of this direct message.') def GetText(self): '''Get the text of this direct message. Returns: The text of this direct message. ''' return self._text def SetText(self, text): '''Set the text of this direct message. Args: text: The text of this direct message ''' self._text = text text = property(GetText, SetText, doc='The text of this direct message') def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.id == other.id and \ self.created_at == other.created_at and \ self.sender_id == other.sender_id and \ self.sender_screen_name == other.sender_screen_name and \ self.recipient_id == other.recipient_id and \ self.recipient_screen_name == other.recipient_screen_name and \ self.text == other.text except AttributeError: return False def __str__(self): '''A string representation of this twitter.DirectMessage instance. The return value is the same as the JSON string representation. Returns: A string representation of this twitter.DirectMessage instance. ''' return self.AsJsonString() def AsJsonString(self): '''A JSON string representation of this twitter.DirectMessage instance. Returns: A JSON string representation of this twitter.DirectMessage instance ''' return simplejson.dumps(self.AsDict(), sort_keys=True) def AsDict(self): '''A dict representation of this twitter.DirectMessage instance. The return value uses the same key names as the JSON representation. Return: A dict representing this twitter.DirectMessage instance ''' data = {} if self.id: data['id'] = self.id if self.created_at: data['created_at'] = self.created_at if self.sender_id: data['sender_id'] = self.sender_id if self.sender_screen_name: data['sender_screen_name'] = self.sender_screen_name if self.recipient_id: data['recipient_id'] = self.recipient_id if self.recipient_screen_name: data['recipient_screen_name'] = self.recipient_screen_name if self.text: data['text'] = self.text return data @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.DirectMessage instance ''' return DirectMessage(created_at=data.get('created_at', None), recipient_id=data.get('recipient_id', None), sender_id=data.get('sender_id', None), text=data.get('text', None), sender_screen_name=data.get('sender_screen_name', None), id=data.get('id', None), recipient_screen_name=data.get('recipient_screen_name', None)) class Hashtag(object): ''' A class representing a twitter hashtag ''' def __init__(self, text=None): self.text = text @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.Hashtag instance ''' return Hashtag(text = data.get('text', None)) class Trend(object): ''' A class representing a trending topic ''' def __init__(self, name=None, query=None, timestamp=None, url=None): self.name = name self.query = query self.timestamp = timestamp self.url = url def __str__(self): return 'Name: %s\nQuery: %s\nTimestamp: %s\nSearch URL: %s\n' % (self.name, self.query, self.timestamp, self.url) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): try: return other and \ self.name == other.name and \ self.query == other.query and \ self.timestamp == other.timestamp and \ self.url == self.url except AttributeError: return False @staticmethod def NewFromJsonDict(data, timestamp = None): '''Create a new instance based on a JSON dict Args: data: A JSON dict timestamp: Gets set as the timestamp property of the new object Returns: A twitter.Trend object ''' return Trend(name=data.get('name', None), query=data.get('query', None), url=data.get('url', None), timestamp=timestamp) class Url(object): '''A class representing an URL contained in a tweet''' def __init__(self, url=None, expanded_url=None): self.url = url self.expanded_url = expanded_url @staticmethod def NewFromJsonDict(data): '''Create a new instance based on a JSON dict. Args: data: A JSON dict, as converted from the JSON in the twitter API Returns: A twitter.Url instance ''' return Url(url=data.get('url', None), expanded_url=data.get('expanded_url', None)) class Api(object): '''A python interface into the Twitter API By default, the Api caches results for 1 minute. Example usage: To create an instance of the twitter.Api class, with no authentication: >>> import twitter >>> api = twitter.Api() To fetch the most recently posted public twitter status messages: >>> statuses = api.GetPublicTimeline() >>> print [s.user.name for s in statuses] [u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #... To fetch a single user's public status messages, where "user" is either a Twitter "short name" or their user id. >>> statuses = api.GetUserTimeline(user) >>> print [s.text for s in statuses] To use authentication, instantiate the twitter.Api class with a consumer key and secret; and the oAuth key and secret: >>> api = twitter.Api(consumer_key='twitter consumer key', consumer_secret='twitter consumer secret', access_token_key='the_key_given', access_token_secret='the_key_secret') To fetch your friends (after being authenticated): >>> users = api.GetFriends() >>> print [u.name for u in users] To post a twitter status message (after being authenticated): >>> status = api.PostUpdate('I love python-twitter!') >>> print status.text I love python-twitter! There are many other methods, including: >>> api.PostUpdates(status) >>> api.PostDirectMessage(user, text) >>> api.GetUser(user) >>> api.GetReplies() >>> api.GetUserTimeline(user) >>> api.GetHomeTimeLine() >>> api.GetStatus(id) >>> api.DestroyStatus(id) >>> api.GetFriendsTimeline(user) >>> api.GetFriends(user) >>> api.GetFollowers() >>> api.GetFeatured() >>> api.GetDirectMessages() >>> api.GetSentDirectMessages() >>> api.PostDirectMessage(user, text) >>> api.DestroyDirectMessage(id) >>> api.DestroyFriendship(user) >>> api.CreateFriendship(user) >>> api.GetUserByEmail(email) >>> api.VerifyCredentials() ''' DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute _API_REALM = 'Twitter API' def __init__(self, consumer_key=None, consumer_secret=None, access_token_key=None, access_token_secret=None, input_encoding=None, request_headers=None, cache=DEFAULT_CACHE, shortner=None, base_url=None, use_gzip_compression=False, debugHTTP=False): '''Instantiate a new twitter.Api object. Args: consumer_key: Your Twitter user's consumer_key. consumer_secret: Your Twitter user's consumer_secret. access_token_key: The oAuth access token key value you retrieved from running get_access_token.py. access_token_secret: The oAuth access token's secret, also retrieved from the get_access_token.py run. input_encoding: The encoding used to encode input strings. [Optional] request_header: A dictionary of additional HTTP request headers. [Optional] cache: The cache instance to use. Defaults to DEFAULT_CACHE. Use None to disable caching. [Optional] shortner: The shortner instance to use. Defaults to None. See shorten_url.py for an example shortner. [Optional] base_url: The base URL to use to contact the Twitter API. Defaults to https://api.twitter.com. [Optional] use_gzip_compression: Set to True to tell enable gzip compression for any call made to Twitter. Defaults to False. [Optional] debugHTTP: Set to True to enable debug output from urllib2 when performing any HTTP requests. Defaults to False. [Optional] ''' self.SetCache(cache) self._urllib = urllib2 self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT self._input_encoding = input_encoding self._use_gzip = use_gzip_compression self._debugHTTP = debugHTTP self._oauth_consumer = None self._shortlink_size = 19 self._InitializeRequestHeaders(request_headers) self._InitializeUserAgent() self._InitializeDefaultParameters() if base_url is None: self.base_url = 'https://api.twitter.com/1.1' else: self.base_url = base_url if consumer_key is not None and (access_token_key is None or access_token_secret is None): print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.' print >> sys.stderr, 'If your using this library from a command line utility, please' print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.' raise TwitterError('Twitter requires oAuth Access Token for all API access') self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret) def SetCredentials(self, consumer_key, consumer_secret, access_token_key=None, access_token_secret=None): '''Set the consumer_key and consumer_secret for this instance Args: consumer_key: The consumer_key of the twitter account. consumer_secret: The consumer_secret for the twitter account. access_token_key: The oAuth access token key value you retrieved from running get_access_token.py. access_token_secret: The oAuth access token's secret, also retrieved from the get_access_token.py run. ''' self._consumer_key = consumer_key self._consumer_secret = consumer_secret self._access_token_key = access_token_key self._access_token_secret = access_token_secret self._oauth_consumer = None if consumer_key is not None and consumer_secret is not None and \ access_token_key is not None and access_token_secret is not None: self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT() self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() self._oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret) self._oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret) def ClearCredentials(self): '''Clear the any credentials for this instance ''' self._consumer_key = None self._consumer_secret = None self._access_token_key = None self._access_token_secret = None self._oauth_consumer = None def GetSearch(self, term=None, geocode=None, since_id=None, max_id=None, until=None, count=15, lang=None, locale=None, result_type="mixed", include_entities=None): '''Return twitter search results for a given term. Args: term: Term to search by. Optional if you include geocode. since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns only statuses with an ID less than (that is, older than) or equal to the specified ID. [Optional] until: Returns tweets generated before the given date. Date should be formatted as YYYY-MM-DD. [Optional] geocode: Geolocation information in the form (latitude, longitude, radius) [Optional] count: Number of results to return. Default is 15 [Optional] lang: Language for results as ISO 639-1 code. Default is None (all languages) [Optional] locale: Language of the search query. Currently only 'ja' is effective. This is intended for language-specific consumers and the default should work in the majority of cases. result_type: Type of result which should be returned. Default is "mixed". Other valid options are "recent" and "popular". [Optional] include_entities: If True, each tweet will include a node called "entities,". This node offers a variety of metadata about the tweet in a discrete structure, including: user_mentions, urls, and hashtags. [Optional] Returns: A sequence of twitter.Status instances, one for each message containing the term ''' # Build request parameters parameters = {} if since_id: try: parameters['since_id'] = long(since_id) except: raise TwitterError("since_id must be an integer") if max_id: try: parameters['max_id'] = long(max_id) except: raise TwitterError("max_id must be an integer") if until: parameters['until'] = until if lang: parameters['lang'] = lang if locale: parameters['locale'] = locale if term is None and geocode is None: return [] if term is not None: parameters['q'] = term if geocode is not None: parameters['geocode'] = ','.join(map(str, geocode)) if include_entities: parameters['include_entities'] = 1 try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if result_type in ["mixed", "popular", "recent"]: parameters['result_type'] = result_type # Make and send requests url = '%s/search/tweets.json' % self.base_url json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) # Return built list of statuses return [Status.NewFromJsonDict(x) for x in data['statuses']] def GetUsersSearch(self, term=None, page=1, count=20, include_entities=None): '''Return twitter user search results for a given term. Args: term: Term to search by. page: Page of results to return. Default is 1 [Optional] count: Number of results to return. Default is 20 [Optional] include_entities: If True, each tweet will include a node called "entities,". This node offers a variety of metadata about the tweet in a discrete structure, including: user_mentions, urls, and hashtags. [Optional] Returns: A sequence of twitter.User instances, one for each message containing the term ''' # Build request parameters parameters = {} if term is not None: parameters['q'] = term if include_entities: parameters['include_entities'] = 1 try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") # Make and send requests url = '%s/users/search.json' % self.base_url json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [User.NewFromJsonDict(x) for x in data] def GetTrendsCurrent(self, exclude=None): '''Get the current top trending topics (global) Args: exclude: Appends the exclude parameter as a request parameter. Currently only exclude=hashtags is supported. [Optional] Returns: A list with 10 entries. Each entry contains a trend. ''' return self.GetTrendsWoeid(id=1, exclude=exclude) def GetTrendsWoeid(self, id, exclude=None): '''Return the top 10 trending topics for a specific WOEID, if trending information is available for it. Args: woeid: the Yahoo! Where On Earth ID for a location. exclude: Appends the exclude parameter as a request parameter. Currently only exclude=hashtags is supported. [Optional] Returns: A list with 10 entries. Each entry contains a trend. ''' url = '%s/trends/place.json' % (self.base_url) parameters = {'id': id} if exclude: parameters['exclude'] = exclude json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) trends = [] timestamp = data[0]['as_of'] for trend in data[0]['trends']: trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp)) return trends def GetHomeTimeline(self, count=None, since_id=None, max_id=None, trim_user=False, exclude_replies=False, contributor_details=False, include_entities=True): ''' Fetch a collection of the most recent Tweets and retweets posted by the authenticating user and the users they follow. The home timeline is central to how most users interact with the Twitter service. The twitter.Api instance must be authenticated. Args: count: Specifies the number of statuses to retrieve. May not be greater than 200. Defaults to 20. [Optional] since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. [Optional] trim_user: When True, each tweet returned in a timeline will include a user object including only the status authors numerical ID. Omit this parameter to receive the complete user object. [Optional] exclude_replies: This parameter will prevent replies from appearing in the returned timeline. Using exclude_replies with the count parameter will mean you will receive up-to count tweets - this is because the count parameter retrieves that many tweets before filtering out retweets and replies. [Optional] contributor_details: This parameter enhances the contributors element of the status response to include the screen_name of the contributor. By default only the user_id of the contributor is included. [Optional] include_entities: The entities node will be disincluded when set to false. This node offers a variety of metadata about the tweet in a discreet structure, including: user_mentions, urls, and hashtags. [Optional] Returns: A sequence of twitter.Status instances, one for each message ''' url = '%s/statuses/home_timeline.json' % self.base_url if not self._oauth_consumer: raise TwitterError("API must be authenticated.") parameters = {} if count is not None: try: if int(count) > 200: raise TwitterError("'count' may not be greater than 200") except ValueError: raise TwitterError("'count' must be an integer") parameters['count'] = count if since_id: try: parameters['since_id'] = long(since_id) except ValueError: raise TwitterError("'since_id' must be an integer") if max_id: try: parameters['max_id'] = long(max_id) except ValueError: raise TwitterError("'max_id' must be an integer") if trim_user: parameters['trim_user'] = 1 if exclude_replies: parameters['exclude_replies'] = 1 if contributor_details: parameters['contributor_details'] = 1 if not include_entities: parameters['include_entities'] = 'false' json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(x) for x in data] def GetUserTimeline(self, user_id=None, screen_name=None, since_id=None, max_id=None, count=None, include_rts=None, trim_user=None, exclude_replies=None): '''Fetch the sequence of public Status messages for a single user. The twitter.Api instance must be authenticated if the user is private. Args: user_id: Specifies the ID of the user for whom to return the user_timeline. Helpful for disambiguating when a valid user ID is also a valid screen name. [Optional] screen_name: Specifies the screen name of the user for whom to return the user_timeline. Helpful for disambiguating when a valid screen name is also a user ID. [Optional] since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns only statuses with an ID less than (that is, older than) or equal to the specified ID. [Optional] count: Specifies the number of statuses to retrieve. May not be greater than 200. [Optional] include_rts: If True, the timeline will contain native retweets (if they exist) in addition to the standard stream of tweets. [Optional] trim_user: If True, statuses will only contain the numerical user ID only. Otherwise a full user object will be returned for each status. [Optional] exclude_replies: If True, this will prevent replies from appearing in the returned timeline. Using exclude_replies with the count parameter will mean you will receive up-to count tweets - this is because the count parameter retrieves that many tweets before filtering out retweets and replies. This parameter is only supported for JSON and XML responses. [Optional] Returns: A sequence of Status instances, one for each message up to count ''' parameters = {} url = '%s/statuses/user_timeline.json' % (self.base_url) if user_id: parameters['user_id'] = user_id elif screen_name: parameters['screen_name'] = screen_name if since_id: try: parameters['since_id'] = long(since_id) except: raise TwitterError("since_id must be an integer") if max_id: try: parameters['max_id'] = long(max_id) except: raise TwitterError("max_id must be an integer") if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if include_rts: parameters['include_rts'] = 1 if trim_user: parameters['trim_user'] = 1 if exclude_replies: parameters['exclude_replies'] = 1 json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(x) for x in data] def GetStatus(self, id, trim_user=False, include_my_retweet=True, include_entities=True): '''Returns a single status message, specified by the id parameter. The twitter.Api instance must be authenticated. Args: id: The numeric ID of the status you are trying to retrieve. trim_user: When set to True, each tweet returned in a timeline will include a user object including only the status authors numerical ID. Omit this parameter to receive the complete user object. [Optional] include_my_retweet: When set to True, any Tweets returned that have been retweeted by the authenticating user will include an additional current_user_retweet node, containing the ID of the source status for the retweet. [Optional] include_entities: If False, the entities node will be disincluded. This node offers a variety of metadata about the tweet in a discreet structure, including: user_mentions, urls, and hashtags. [Optional] Returns: A twitter.Status instance representing that status message ''' url = '%s/statuses/show.json' % (self.base_url) if not self._oauth_consumer: raise TwitterError("API must be authenticated.") parameters = {} try: parameters['id'] = long(id) except ValueError: raise TwitterError("'id' must be an integer.") if trim_user: parameters['trim_user'] = 1 if include_my_retweet: parameters['include_my_retweet'] = 1 if not include_entities: parameters['include_entities'] = 'none' json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) def DestroyStatus(self, id, trim_user=False): '''Destroys the status specified by the required ID parameter. The twitter.Api instance must be authenticated and the authenticating user must be the author of the specified status. Args: id: The numerical ID of the status you're trying to destroy. Returns: A twitter.Status instance representing the destroyed status message ''' if not self._oauth_consumer: raise TwitterError("API must be authenticated.") try: post_data = {'id': long(id)} except: raise TwitterError("id must be an integer") url = '%s/statuses/destroy/%s.json' % (self.base_url, id) if trim_user: post_data['trim_user'] = 1 json = self._FetchUrl(url, post_data=post_data) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) @classmethod def _calculate_status_length(cls, status, linksize=19): dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-'*(linksize - 18)) shortened = ' '.join([x if not (x.startswith('http://') or x.startswith('https://')) else dummy_link_replacement for x in status.split(' ')]) return len(shortened) def PostUpdate(self, status, in_reply_to_status_id=None, latitude=None, longitude=None, place_id=None, display_coordinates=False, trim_user=False): '''Post a twitter status message from the authenticated user. The twitter.Api instance must be authenticated. https://dev.twitter.com/docs/api/1.1/post/statuses/update Args: status: The message text to be posted. Must be less than or equal to 140 characters. in_reply_to_status_id: The ID of an existing status that the status to be posted is in reply to. This implicitly sets the in_reply_to_user_id attribute of the resulting status to the user ID of the message being replied to. Invalid/missing status IDs will be ignored. [Optional] latitude: Latitude coordinate of the tweet in degrees. Will only work in conjunction with longitude argument. Both longitude and latitude will be ignored by twitter if the user has a false geo_enabled setting. [Optional] longitude: Longitude coordinate of the tweet in degrees. Will only work in conjunction with latitude argument. Both longitude and latitude will be ignored by twitter if the user has a false geo_enabled setting. [Optional] place_id: A place in the world. These IDs can be retrieved from GET geo/reverse_geocode. [Optional] display_coordinates: Whether or not to put a pin on the exact coordinates a tweet has been sent from. [Optional] trim_user: If True the returned payload will only contain the user IDs, otherwise the payload will contain the full user data item. [Optional] Returns: A twitter.Status instance representing the message posted. ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") url = '%s/statuses/update.json' % self.base_url if isinstance(status, unicode) or self._input_encoding is None: u_status = status else: u_status = unicode(status, self._input_encoding) #if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT: # raise TwitterError("Text must be less than or equal to %d characters. " # "Consider using PostUpdates." % CHARACTER_LIMIT) data = {'status': status} if in_reply_to_status_id: data['in_reply_to_status_id'] = in_reply_to_status_id if latitude is not None and longitude is not None: data['lat'] = str(latitude) data['long'] = str(longitude) if place_id is not None: data['place_id'] = str(place_id) if display_coordinates: data['display_coordinates'] = 'true' if trim_user: data['trim_user'] = 'true' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) def PostUpdates(self, status, continuation=None, **kwargs): '''Post one or more twitter status messages from the authenticated user. Unlike api.PostUpdate, this method will post multiple status updates if the message is longer than 140 characters. The twitter.Api instance must be authenticated. Args: status: The message text to be posted. May be longer than 140 characters. continuation: The character string, if any, to be appended to all but the last message. Note that Twitter strips trailing '...' strings from messages. Consider using the unicode \u2026 character (horizontal ellipsis) instead. [Defaults to None] **kwargs: See api.PostUpdate for a list of accepted parameters. Returns: A of list twitter.Status instance representing the messages posted. ''' results = list() if continuation is None: continuation = '' line_length = CHARACTER_LIMIT - len(continuation) lines = textwrap.wrap(status, line_length) for line in lines[0:-1]: results.append(self.PostUpdate(line + continuation, **kwargs)) results.append(self.PostUpdate(lines[-1], **kwargs)) return results def PostRetweet(self, original_id, trim_user=False): '''Retweet a tweet with the Retweet API. The twitter.Api instance must be authenticated. Args: original_id: The numerical id of the tweet that will be retweeted trim_user: If True the returned payload will only contain the user IDs, otherwise the payload will contain the full user data item. [Optional] Returns: A twitter.Status instance representing the original tweet with retweet details embedded. ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") try: if int(original_id) <= 0: raise TwitterError("'original_id' must be a positive number") except ValueError: raise TwitterError("'original_id' must be an integer") url = '%s/statuses/retweet/%s.json' % (self.base_url, original_id) data = {'id': original_id} if trim_user: data['trim_user'] = 'true' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) def GetUserRetweets(self, count=None, since_id=None, max_id=None, trim_user=False): '''Fetch the sequence of retweets made by the authenticated user. The twitter.Api instance must be authenticated. Args: count: The number of status messages to retrieve. [Optional] since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. [Optional] trim_user: If True the returned payload will only contain the user IDs, otherwise the payload will contain the full user data item. [Optional] Returns: A sequence of twitter.Status instances, one for each message up to count ''' return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user, exclude_replies=True, include_rts=True) def GetReplies(self, since_id=None, count=None, max_id=None, trim_user=False): '''Get a sequence of status messages representing the 20 most recent replies (status updates prefixed with @twitterID) to the authenticating user. Args: since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. [Optional] trim_user: If True the returned payload will only contain the user IDs, otherwise the payload will contain the full user data item. [Optional] Returns: A sequence of twitter.Status instances, one for each reply to the user. ''' return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user, exclude_replies=False, include_rts=False) def GetRetweets(self, statusid, count=None, trim_user=False): '''Returns up to 100 of the first retweets of the tweet identified by statusid Args: statusid: The ID of the tweet for which retweets should be searched for count: The number of status messages to retrieve. [Optional] trim_user: If True the returned payload will only contain the user IDs, otherwise the payload will contain the full user data item. [Optional] Returns: A list of twitter.Status instances, which are retweets of statusid ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instsance must be authenticated.") url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid) parameters = {} if trim_user: parameters['trim_user'] = 'true' if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(s) for s in data] def GetRetweetsOfMe(self, count=None, since_id=None, max_id=None, trim_user=False, include_entities=True, include_user_entities=True): '''Returns up to 100 of the most recent tweets of the user that have been retweeted by others. Args: count: The number of retweets to retrieve, up to 100. If omitted, 20 is assumed. since_id: Returns results with an ID greater than (newer than) this ID. max_id: Returns results with an ID less than or equal to this ID. trim_user: When True, the user object for each tweet will only be an ID. include_entities: When True, the tweet entities will be included. include_user_entities: When True, the user entities will be included. ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") url = '%s/statuses/retweets_of_me.json' % self.base_url parameters = {} if count is not None: try: if int(count) > 100: raise TwitterError("'count' may not be greater than 100") except ValueError: raise TwitterError("'count' must be an integer") if count: parameters['count'] = count if since_id: parameters['since_id'] = since_id if max_id: parameters['max_id'] = max_id if trim_user: parameters['trim_user'] = trim_user if not include_entities: parameters['include_entities'] = include_entities if not include_user_entities: parameters['include_user_entities'] = include_user_entities json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(s) for s in data] def GetFriends(self, user_id=None, screen_name=None, cursor=-1, skip_status=False, include_user_entities=False): '''Fetch the sequence of twitter.User instances, one for each friend. The twitter.Api instance must be authenticated. Args: user_id: The twitter id of the user whose friends you are fetching. If not specified, defaults to the authenticated user. [Optional] screen_name: The twitter name of the user whose friends you are fetching. If not specified, defaults to the authenticated user. [Optional] cursor: Should be set to -1 for the initial call and then is used to control what result page Twitter returns [Optional(ish)] skip_status: If True the statuses will not be returned in the user items. [Optional] include_user_entities: When True, the user entities will be included. Returns: A sequence of twitter.User instances, one for each friend ''' if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") url = '%s/friends/list.json' % self.base_url result = [] parameters = {} if user_id is not None: parameters['user_id'] = user_id if screen_name is not None: parameters['screen_name'] = screen_name if skip_status: parameters['skip_status'] = True if include_user_entities: parameters['include_user_entities'] = True while True: parameters['cursor'] = cursor json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) result += [User.NewFromJsonDict(x) for x in data['users']] if 'next_cursor' in data: if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']: break else: cursor = data['next_cursor'] else: break return result def GetFriendIDs(self, user_id=None, screen_name=None, cursor=-1, stringify_ids=False, count=None): '''Returns a list of twitter user id's for every person the specified user is following. Args: user_id: The id of the user to retrieve the id list for [Optional] screen_name: The screen_name of the user to retrieve the id list for [Optional] cursor: Specifies the Twitter API Cursor location to start at. Note: there are pagination limits. [Optional] stringify_ids: if True then twitter will return the ids as strings instead of integers. [Optional] count: The number of status messages to retrieve. [Optional] Returns: A list of integers, one for each user id. ''' url = '%s/friends/ids.json' % self.base_url if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") parameters = {} if user_id is not None: parameters['user_id'] = user_id if screen_name is not None: parameters['screen_name'] = screen_name if stringify_ids: parameters['stringify_ids'] = True if count is not None: parameters['count'] = count result = [] while True: parameters['cursor'] = cursor json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) result += [x for x in data['ids']] if 'next_cursor' in data: if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']: break else: cursor = data['next_cursor'] else: break return result def GetFollowerIDs(self, user_id=None, screen_name=None, cursor=-1, stringify_ids=False, count=None, total_count=None): '''Returns a list of twitter user id's for every person that is following the specified user. Args: user_id: The id of the user to retrieve the id list for [Optional] screen_name: The screen_name of the user to retrieve the id list for [Optional] cursor: Specifies the Twitter API Cursor location to start at. Note: there are pagination limits. [Optional] stringify_ids: if True then twitter will return the ids as strings instead of integers. [Optional] count: The number of user id's to retrieve per API request. Please be aware that this might get you rate-limited if set to a small number. By default Twitter will retrieve 5000 UIDs per call. [Optional] total_count: The total amount of UIDs to retrieve. Good if the account has many followers and you don't want to get rate limited. The data returned might contain more UIDs if total_count is not a multiple of count (5000 by default). [Optional] Returns: A list of integers, one for each user id. ''' url = '%s/followers/ids.json' % self.base_url if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") parameters = {} if user_id is not None: parameters['user_id'] = user_id if screen_name is not None: parameters['screen_name'] = screen_name if stringify_ids: parameters['stringify_ids'] = True if count is not None: parameters['count'] = count result = [] while True: if total_count and total_count < count: parameters['count'] = total_count parameters['cursor'] = cursor json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) result += [x for x in data['ids']] if 'next_cursor' in data: if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']: break else: cursor = data['next_cursor'] total_count -= len(data['ids']) if total_count < 1: break else: break return result def GetFollowers(self, user_id=None, screen_name=None, cursor=-1, skip_status=False, include_user_entities=False): '''Fetch the sequence of twitter.User instances, one for each follower The twitter.Api instance must be authenticated. Args: user_id: The twitter id of the user whose followers you are fetching. If not specified, defaults to the authenticated user. [Optional] screen_name: The twitter name of the user whose followers you are fetching. If not specified, defaults to the authenticated user. [Optional] cursor: Should be set to -1 for the initial call and then is used to control what result page Twitter returns [Optional(ish)] skip_status: If True the statuses will not be returned in the user items. [Optional] include_user_entities: When True, the user entities will be included. Returns: A sequence of twitter.User instances, one for each follower ''' if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") url = '%s/followers/list.json' % self.base_url result = [] parameters = {} if user_id is not None: parameters['user_id'] = user_id if screen_name is not None: parameters['screen_name'] = screen_name if skip_status: parameters['skip_status'] = True if include_user_entities: parameters['include_user_entities'] = True while True: parameters['cursor'] = cursor json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) result += [User.NewFromJsonDict(x) for x in data['users']] if 'next_cursor' in data: if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']: break else: cursor = data['next_cursor'] else: break return result def UsersLookup(self, user_id=None, screen_name=None, users=None, include_entities=True): '''Fetch extended information for the specified users. Users may be specified either as lists of either user_ids, screen_names, or twitter.User objects. The list of users that are queried is the union of all specified parameters. The twitter.Api instance must be authenticated. Args: user_id: A list of user_ids to retrieve extended information. [Optional] screen_name: A list of screen_names to retrieve extended information. [Optional] users: A list of twitter.User objects to retrieve extended information. [Optional] include_entities: The entities node that may appear within embedded statuses will be disincluded when set to False. [Optional] Returns: A list of twitter.User objects for the requested users ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") if not user_id and not screen_name and not users: raise TwitterError("Specify at least one of user_id, screen_name, or users.") url = '%s/users/lookup.json' % self.base_url parameters = {} uids = list() if user_id: uids.extend(user_id) if users: uids.extend([u.id for u in users]) if len(uids): parameters['user_id'] = ','.join(["%s" % u for u in uids]) if screen_name: parameters['screen_name'] = ','.join(screen_name) if not include_entities: parameters['include_entities'] = 'false' json = self._FetchUrl(url, parameters=parameters) try: data = self._ParseAndCheckTwitter(json) except TwitterError, e: _, e, _ = sys.exc_info() t = e.args[0] if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34): data = [] else: raise return [User.NewFromJsonDict(u) for u in data] def GetUser(self, user_id=None, screen_name=None, include_entities=True): '''Returns a single user. The twitter.Api instance must be authenticated. Args: user_id: The id of the user to retrieve. [Optional] screen_name: The screen name of the user for whom to return results for. Either a user_id or screen_name is required for this method. [Optional] include_entities: if set to False, the 'entities' node will not be included. [Optional] Returns: A twitter.User instance representing that user ''' url = '%s/users/show.json' % (self.base_url) parameters = {} if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") if user_id: parameters['user_id'] = user_id elif screen_name: parameters['screen_name'] = screen_name else: raise TwitterError("Specify at least one of user_id or screen_name.") if not include_entities: parameters['include_entities'] = 'false' json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return User.NewFromJsonDict(data) def GetDirectMessages(self, since_id=None, max_id=None, count=None, include_entities=True, skip_status=False): '''Returns a list of the direct messages sent to the authenticating user. The twitter.Api instance must be authenticated. Args: since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. [Optional] count: Specifies the number of direct messages to try and retrieve, up to a maximum of 200. The value of count is best thought of as a limit to the number of Tweets to return because suspended or deleted content is removed after the count has been applied. [Optional] include_entities: The entities node will not be included when set to False. [Optional] skip_status: When set to True statuses will not be included in the returned user objects. [Optional] Returns: A sequence of twitter.DirectMessage instances ''' url = '%s/direct_messages.json' % self.base_url if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") parameters = {} if since_id: parameters['since_id'] = since_id if max_id: parameters['max_id'] = max_id if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if not include_entities: parameters['include_entities'] = 'false' if skip_status: parameters['skip_status'] = 1 json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [DirectMessage.NewFromJsonDict(x) for x in data] def GetSentDirectMessages(self, since_id=None, max_id=None, count=None, page=None, include_entities=True): '''Returns a list of the direct messages sent by the authenticating user. The twitter.Api instance must be authenticated. Args: since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occured since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns results with an ID less than (that is, older than) or equal to the specified ID. [Optional] count: Specifies the number of direct messages to try and retrieve, up to a maximum of 200. The value of count is best thought of as a limit to the number of Tweets to return because suspended or deleted content is removed after the count has been applied. [Optional] page: Specifies the page of results to retrieve. Note: there are pagination limits. [Optional] include_entities: The entities node will not be included when set to False. [Optional] Returns: A sequence of twitter.DirectMessage instances ''' url = '%s/direct_messages/sent.json' % self.base_url if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") parameters = {} if since_id: parameters['since_id'] = since_id if page: parameters['page'] = page if max_id: parameters['max_id'] = max_id if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if not include_entities: parameters['include_entities'] = 'false' json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [DirectMessage.NewFromJsonDict(x) for x in data] def PostDirectMessage(self, text, user_id=None, screen_name=None): '''Post a twitter direct message from the authenticated user The twitter.Api instance must be authenticated. user_id or screen_name must be specified. Args: text: The message text to be posted. Must be less than 140 characters. user_id: The ID of the user who should receive the direct message. [Optional] screen_name: The screen name of the user who should receive the direct message. [Optional] Returns: A twitter.DirectMessage instance representing the message posted ''' if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") url = '%s/direct_messages/new.json' % self.base_url data = {'text': text} if user_id: data['user_id'] = user_id elif screen_name: data['screen_name'] = screen_name else: raise TwitterError("Specify at least one of user_id or screen_name.") json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return DirectMessage.NewFromJsonDict(data) def DestroyDirectMessage(self, id, include_entities=True): '''Destroys the direct message specified in the required ID parameter. The twitter.Api instance must be authenticated, and the authenticating user must be the recipient of the specified direct message. Args: id: The id of the direct message to be destroyed Returns: A twitter.DirectMessage instance representing the message destroyed ''' url = '%s/direct_messages/destroy.json' % self.base_url data = {'id': id} if not include_entities: data['include_entities'] = 'false' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return DirectMessage.NewFromJsonDict(data) def CreateFriendship(self, user_id=None, screen_name=None, follow=True): '''Befriends the user specified by the user_id or screen_name. The twitter.Api instance must be authenticated. Args: user_id: A user_id to follow [Optional] screen_name: A screen_name to follow [Optional] follow: Set to False to disable notifications for the target user Returns: A twitter.User instance representing the befriended user. ''' url = '%s/friendships/create.json' % (self.base_url) data = {} if user_id: data['user_id'] = user_id elif screen_name: data['screen_name'] = screen_name else: raise TwitterError("Specify at least one of user_id or screen_name.") if follow: data['follow'] = 'true' else: data['follow'] = 'false' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return User.NewFromJsonDict(data) def DestroyFriendship(self, user_id=None, screen_name=None): '''Discontinues friendship with a user_id or screen_name. The twitter.Api instance must be authenticated. Args: user_id: A user_id to unfollow [Optional] screen_name: A screen_name to unfollow [Optional] Returns: A twitter.User instance representing the discontinued friend. ''' url = '%s/friendships/destroy.json' % self.base_url data = {} if user_id: data['user_id'] = user_id elif screen_name: data['screen_name'] = screen_name else: raise TwitterError("Specify at least one of user_id or screen_name.") json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return User.NewFromJsonDict(data) def CreateFavorite(self, status=None, id=None, include_entities=True): '''Favorites the specified status object or id as the authenticating user. Returns the favorite status when successful. The twitter.Api instance must be authenticated. Args: id: The id of the twitter status to mark as a favorite. [Optional] status: The twitter.Status object to mark as a favorite. [Optional] include_entities: The entities node will be omitted when set to False. Returns: A twitter.Status instance representing the newly-marked favorite. ''' url = '%s/favorites/create.json' % self.base_url data = {} if id: data['id'] = id elif status: data['id'] = status.id else: raise TwitterError("Specify id or status") if not include_entities: data['include_entities'] = 'false' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) def DestroyFavorite(self, status=None, id=None, include_entities=True): '''Un-Favorites the specified status object or id as the authenticating user. Returns the un-favorited status when successful. The twitter.Api instance must be authenticated. Args: id: The id of the twitter status to unmark as a favorite. [Optional] status: The twitter.Status object to unmark as a favorite. [Optional] include_entities: The entities node will be omitted when set to False. Returns: A twitter.Status instance representing the newly-unmarked favorite. ''' url = '%s/favorites/destroy.json' % self.base_url data = {} if id: data['id'] = id elif status: data['id'] = status.id else: raise TwitterError("Specify id or status") if not include_entities: data['include_entities'] = 'false' json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return Status.NewFromJsonDict(data) def GetFavorites(self, user_id=None, screen_name=None, count=None, since_id=None, max_id=None, include_entities=True): '''Return a list of Status objects representing favorited tweets. By default, returns the (up to) 20 most recent tweets for the authenticated user. Args: user: The twitter name or id of the user whose favorites you are fetching. If not specified, defaults to the authenticated user. [Optional] page: Specifies the page of results to retrieve. Note: there are pagination limits. [Optional] ''' parameters = {} url = '%s/favorites/list.json' % self.base_url if user_id: parameters['user_id'] = user_id elif screen_name: parameters['screen_name'] = user_id if since_id: try: parameters['since_id'] = long(since_id) except: raise TwitterError("since_id must be an integer") if max_id: try: parameters['max_id'] = long(max_id) except: raise TwitterError("max_id must be an integer") if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if include_entities: parameters['include_entities'] = True json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(x) for x in data] def GetMentions(self, count=None, since_id=None, max_id=None, trim_user=False, contributor_details=False, include_entities=True): '''Returns the 20 most recent mentions (status containing @screen_name) for the authenticating user. Args: count: Specifies the number of tweets to try and retrieve, up to a maximum of 200. The value of count is best thought of as a limit to the number of tweets to return because suspended or deleted content is removed after the count has been applied. [Optional] since_id: Returns results with an ID greater than (that is, more recent than) the specified ID. There are limits to the number of Tweets which can be accessed through the API. If the limit of Tweets has occurred since the since_id, the since_id will be forced to the oldest ID available. [Optional] max_id: Returns only statuses with an ID less than (that is, older than) the specified ID. [Optional] trim_user: When set to True, each tweet returned in a timeline will include a user object including only the status authors numerical ID. Omit this parameter to receive the complete user object. contributor_details: If set to True, this parameter enhances the contributors element of the status response to include the screen_name of the contributor. By default only the user_id of the contributor is included. include_entities: The entities node will be disincluded when set to False. Returns: A sequence of twitter.Status instances, one for each mention of the user. ''' url = '%s/statuses/mentions_timeline.json' % self.base_url if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") parameters = {} if count: try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if since_id: try: parameters['since_id'] = long(since_id) except: raise TwitterError("since_id must be an integer") if max_id: try: parameters['max_id'] = long(max_id) except: raise TwitterError("max_id must be an integer") if trim_user: parameters['trim_user'] = 1 if contributor_details: parameters['contributor_details'] = 'true' if not include_entities: parameters['include_entities'] = 'false' json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [Status.NewFromJsonDict(x) for x in data] def CreateList(self, name, mode=None, description=None): '''Creates a new list with the give name for the authenticated user. The twitter.Api instance must be authenticated. Args: name: New name for the list mode: 'public' or 'private'. Defaults to 'public'. [Optional] description: Description of the list. [Optional] Returns: A twitter.List instance representing the new list ''' url = '%s/lists/create.json' % self.base_url if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") parameters = {'name': name} if mode is not None: parameters['mode'] = mode if description is not None: parameters['description'] = description json = self._FetchUrl(url, post_data=parameters) data = self._ParseAndCheckTwitter(json) return List.NewFromJsonDict(data) def DestroyList(self, owner_screen_name=False, owner_id=False, list_id=None, slug=None): ''' Destroys the list identified by list_id or owner_screen_name/owner_id and slug. The twitter.Api instance must be authenticated. Args: owner_screen_name: The screen_name of the user who owns the list being requested by a slug. owner_id: The user ID of the user who owns the list being requested by a slug. list_id: The numerical id of the list. slug: You can identify a list by its slug instead of its numerical id. If you decide to do so, note that you'll also have to specify the list owner using the owner_id or owner_screen_name parameters. Returns: A twitter.List instance representing the removed list. ''' url = '%s/lists/destroy.json' % self.base_url data = {} if list_id: try: data['list_id']= long(list_id) except: raise TwitterError("list_id must be an integer") elif slug: data['slug'] = slug if owner_id: try: data['owner_id'] = long(owner_id) except: raise TwitterError("owner_id must be an integer") elif owner_screen_name: data['owner_screen_name'] = owner_screen_name else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return List.NewFromJsonDict(data) def CreateSubscription(self, owner_screen_name=False, owner_id=False, list_id=None, slug=None): '''Creates a subscription to a list by the authenticated user The twitter.Api instance must be authenticated. Args: owner_screen_name: The screen_name of the user who owns the list being requested by a slug. owner_id: The user ID of the user who owns the list being requested by a slug. list_id: The numerical id of the list. slug: You can identify a list by its slug instead of its numerical id. If you decide to do so, note that you'll also have to specify the list owner using the owner_id or owner_screen_name parameters. Returns: A twitter.List instance representing the list subscribed to ''' url = '%s/lists/subscribers/create.json' % (self.base_url) if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") data = {} if list_id: try: data['list_id']= long(list_id) except: raise TwitterError("list_id must be an integer") elif slug: data['slug'] = slug if owner_id: try: data['owner_id'] = long(owner_id) except: raise TwitterError("owner_id must be an integer") elif owner_screen_name: data['owner_screen_name'] = owner_screen_name else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return List.NewFromJsonDict(data) def DestroySubscription(self, owner_screen_name=False, owner_id=False, list_id=None, slug=None): '''Destroys the subscription to a list for the authenticated user The twitter.Api instance must be authenticated. Args: owner_screen_name: The screen_name of the user who owns the list being requested by a slug. owner_id: The user ID of the user who owns the list being requested by a slug. list_id: The numerical id of the list. slug: You can identify a list by its slug instead of its numerical id. If you decide to do so, note that you'll also have to specify the list owner using the owner_id or owner_screen_name parameters. Returns: A twitter.List instance representing the removed list. ''' url = '%s/lists/subscribers/destroy.json' % (self.base_url) if not self._oauth_consumer: raise TwitterError("The twitter.Api instance must be authenticated.") data = {} if list_id: try: data['list_id']= long(list_id) except: raise TwitterError("list_id must be an integer") elif slug: data['slug'] = slug if owner_id: try: data['owner_id'] = long(owner_id) except: raise TwitterError("owner_id must be an integer") elif owner_screen_name: data['owner_screen_name'] = owner_screen_name else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") else: raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug") json = self._FetchUrl(url, post_data=data) data = self._ParseAndCheckTwitter(json) return List.NewFromJsonDict(data) def GetSubscriptions(self, user_id=None, screen_name=None, count=20, cursor=-1): ''' Obtain a collection of the lists the specified user is subscribed to, 20 lists per page by default. Does not include the user's own lists. The twitter.Api instance must be authenticated. Args: user_id: The ID of the user for whom to return results for. [Optional] screen_name: The screen name of the user for whom to return results for. [Optional] count: The amount of results to return per page. Defaults to 20. No more than 1000 results will ever be returned in a single page. cursor: "page" value that Twitter will use to start building the list sequence from. -1 to start at the beginning. Twitter will return in the result the values for next_cursor and previous_cursor. [Optional] Returns: A sequence of twitter.List instances, one for each list ''' if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") url = '%s/lists/subscriptions.json' % (self.base_url) parameters = {} try: parameters['cursor'] = int(cursor) except: raise TwitterError("cursor must be an integer") try: parameters['count'] = int(count) except: raise TwitterError("count must be an integer") if user_id is not None: try: parameters['user_id'] = long(user_id) except: raise TwitterError('user_id must be an integer') elif screen_name is not None: parameters['screen_name'] = screen_name else: raise TwitterError('Specify user_id or screen_name') json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) return [List.NewFromJsonDict(x) for x in data['lists']] def GetLists(self, user_id=None, screen_name=None, count=None, cursor=-1): '''Fetch the sequence of lists for a user. The twitter.Api instance must be authenticated. Args: user_id: The ID of the user for whom to return results for. [Optional] screen_name: The screen name of the user for whom to return results for. [Optional] count: The amount of results to return per page. Defaults to 20. No more than 1000 results will ever be returned in a single page. [Optional] cursor: "page" value that Twitter will use to start building the list sequence from. -1 to start at the beginning. Twitter will return in the result the values for next_cursor and previous_cursor. [Optional] Returns: A sequence of twitter.List instances, one for each list ''' if not self._oauth_consumer: raise TwitterError("twitter.Api instance must be authenticated") url = '%s/lists/ownerships.json' % self.base_url result = [] parameters = {} if user_id is not None: try: parameters['user_id'] = long(user_id) except: raise TwitterError('user_id must be an integer') elif screen_name is not None: parameters['screen_name'] = screen_name else: raise TwitterError('Specify user_id or screen_name') if count is not None: parameters['count'] = count while True: parameters['cursor'] = cursor json = self._FetchUrl(url, parameters=parameters) data = self._ParseAndCheckTwitter(json) result += [List.NewFromJsonDict(x) for x in data['lists']] if 'next_cursor' in data: if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']: break else: cursor = data['next_cursor'] else: break return result def VerifyCredentials(self): '''Returns a twitter.User instance if the authenticating user is valid. Returns: A twitter.User instance representing that user if the credentials are valid, None otherwise. ''' if not self._oauth_consumer: raise TwitterError("Api instance must first be given user credentials.") url = '%s/account/verify_credentials.json' % self.base_url try: json = self._FetchUrl(url, no_cache=True) except urllib2.HTTPError, http_error: if http_error.code == httplib.UNAUTHORIZED: return None else: raise http_error data = self._ParseAndCheckTwitter(json) return User.NewFromJsonDict(data) def SetCache(self, cache): '''Override the default cache. Set to None to prevent caching. Args: cache: An instance that supports the same API as the twitter._FileCache ''' if cache == DEFAULT_CACHE: self._cache = _FileCache() else: self._cache = cache def SetUrllib(self, urllib): '''Override the default urllib implementation. Args: urllib: An instance that supports the same API as the urllib2 module ''' self._urllib = urllib def SetCacheTimeout(self, cache_timeout): '''Override the default cache timeout. Args: cache_timeout: Time, in seconds, that responses should be reused. ''' self._cache_timeout = cache_timeout def SetUserAgent(self, user_agent): '''Override the default user agent Args: user_agent: A string that should be send to the server as the User-agent ''' self._request_headers['User-Agent'] = user_agent def SetXTwitterHeaders(self, client, url, version): '''Set the X-Twitter HTTP headers that will be sent to the server. Args: client: The client name as a string. Will be sent to the server as the 'X-Twitter-Client' header. url: The URL of the meta.xml as a string. Will be sent to the server as the 'X-Twitter-Client-URL' header. version: The client version as a string. Will be sent to the server as the 'X-Twitter-Client-Version' header. ''' self._request_headers['X-Twitter-Client'] = client self._request_headers['X-Twitter-Client-URL'] = url self._request_headers['X-Twitter-Client-Version'] = version def SetSource(self, source): '''Suggest the "from source" value to be displayed on the Twitter web site. The value of the 'source' parameter must be first recognized by the Twitter server. New source values are authorized on a case by case basis by the Twitter development team. Args: source: The source name as a string. Will be sent to the server as the 'source' parameter. ''' self._default_params['source'] = source def GetRateLimitStatus(self, resources=None): '''Fetch the rate limit status for the currently authorized user. Args: resources: A comma seperated list of resource families you want to know the current rate limit disposition of. [Optional] Returns: A dictionary containing the time the limit will reset (reset_time), the number of remaining hits allowed before the reset (remaining_hits), the number of hits allowed in a 60-minute period (hourly_limit), and the time of the reset in seconds since The Epoch (reset_time_in_seconds). ''' parameters = {} if resources is not None: parameters['resources'] = resources url = '%s/application/rate_limit_status.json' % self.base_url json = self._FetchUrl(url, parameters=parameters, no_cache=True) data = self._ParseAndCheckTwitter(json) return data def MaximumHitFrequency(self): '''Determines the minimum number of seconds that a program must wait before hitting the server again without exceeding the rate_limit imposed for the currently authenticated user. Returns: The minimum second interval that a program must use so as to not exceed the rate_limit imposed for the user. ''' rate_status = self.GetRateLimitStatus() reset_time = rate_status.get('reset_time', None) limit = rate_status.get('remaining_hits', None) if reset_time: # put the reset time into a datetime object reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7]) # find the difference in time between now and the reset time + 1 hour delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow() if not limit: return int(delta.seconds) # determine the minimum number of seconds allowed as a regular interval max_frequency = int(delta.seconds / limit) + 1 # return the number of seconds return max_frequency return 60 def _BuildUrl(self, url, path_elements=None, extra_params=None): # Break url into constituent parts (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url) # Add any additional path elements to the path if path_elements: # Filter out the path elements that have a value of None p = [i for i in path_elements if i] if not path.endswith('/'): path += '/' path += '/'.join(p) # Add any additional query parameters to the query string if extra_params and len(extra_params) > 0: extra_query = self._EncodeParameters(extra_params) # Add it to the existing query if query: query += '&' + extra_query else: query = extra_query # Return the rebuilt URL return urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) def _InitializeRequestHeaders(self, request_headers): if request_headers: self._request_headers = request_headers else: self._request_headers = {} def _InitializeUserAgent(self): user_agent = 'Python-urllib/%s (python-twitter/%s)' % \ (self._urllib.__version__, __version__) self.SetUserAgent(user_agent) def _InitializeDefaultParameters(self): self._default_params = {} def _DecompressGzippedResponse(self, response): raw_data = response.read() if response.headers.get('content-encoding', None) == 'gzip': url_data = gzip.GzipFile(fileobj=StringIO.StringIO(raw_data)).read() else: url_data = raw_data return url_data def _Encode(self, s): if self._input_encoding: return unicode(s, self._input_encoding).encode('utf-8') else: return unicode(s).encode('utf-8') def _EncodeParameters(self, parameters): '''Return a string in key=value&key=value form Values of None are not included in the output string. Args: parameters: A dict of (key, value) tuples, where value is encoded as specified by self._encoding Returns: A URL-encoded string in "key=value&key=value" form ''' if parameters is None: return None else: return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None])) def _EncodePostData(self, post_data): '''Return a string in key=value&key=value form Values are assumed to be encoded in the format specified by self._encoding, and are subsequently URL encoded. Args: post_data: A dict of (key, value) tuples, where value is encoded as specified by self._encoding Returns: A URL-encoded string in "key=value&key=value" form ''' if post_data is None: return None else: return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()])) def _ParseAndCheckTwitter(self, json): """Try and parse the JSON returned from Twitter and return an empty dictionary if there is any error. This is a purely defensive check because during some Twitter network outages it will return an HTML failwhale page.""" try: data = simplejson.loads(json) self._CheckForTwitterError(data) except ValueError: if "<title>Twitter / Over capacity</title>" in json: raise TwitterError("Capacity Error") if "<title>Twitter / Error</title>" in json: raise TwitterError("Technical Error") raise TwitterError("json decoding") return data def _CheckForTwitterError(self, data): """Raises a TwitterError if twitter returns an error message. Args: data: A python dict created from the Twitter json response Raises: TwitterError wrapping the twitter error message if one exists. """ # Twitter errors are relatively unlikely, so it is faster # to check first, rather than try and catch the exception if 'error' in data: raise TwitterError(data['error']) if 'errors' in data: raise TwitterError(data['errors']) def _FetchUrl(self, url, post_data=None, parameters=None, no_cache=None, use_gzip_compression=None): '''Fetch a URL, optionally caching for a specified time. Args: url: The URL to retrieve post_data: A dict of (str, unicode) key/value pairs. If set, POST will be used. parameters: A dict whose key/value pairs should encoded and added to the query string. [Optional] no_cache: If true, overrides the cache on the current request use_gzip_compression: If True, tells the server to gzip-compress the response. It does not apply to POST requests. Defaults to None, which will get the value to use from the instance variable self._use_gzip [Optional] Returns: A string containing the body of the response. ''' # Build the extra parameters dict extra_params = {} if self._default_params: extra_params.update(self._default_params) if parameters: extra_params.update(parameters) if post_data: http_method = "POST" else: http_method = "GET" if self._debugHTTP: _debug = 1 else: _debug = 0 http_handler = self._urllib.HTTPHandler(debuglevel=_debug) https_handler = self._urllib.HTTPSHandler(debuglevel=_debug) http_proxy = os.environ.get('http_proxy') https_proxy = os.environ.get('https_proxy') if http_proxy is None or https_proxy is None : proxy_status = False else : proxy_status = True opener = self._urllib.OpenerDirector() opener.add_handler(http_handler) opener.add_handler(https_handler) if proxy_status is True : proxy_handler = self._urllib.ProxyHandler({'http':str(http_proxy),'https': str(https_proxy)}) opener.add_handler(proxy_handler) if use_gzip_compression is None: use_gzip = self._use_gzip else: use_gzip = use_gzip_compression # Set up compression if use_gzip and not post_data: opener.addheaders.append(('Accept-Encoding', 'gzip')) if self._oauth_consumer is not None: if post_data and http_method == "POST": parameters = post_data.copy() req = oauth.Request.from_consumer_and_token(self._oauth_consumer, token=self._oauth_token, http_method=http_method, http_url=url, parameters=parameters) req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token) headers = req.to_header() if http_method == "POST": encoded_post_data = req.to_postdata() else: encoded_post_data = None url = req.to_url() else: url = self._BuildUrl(url, extra_params=extra_params) encoded_post_data = self._EncodePostData(post_data) # Open and return the URL immediately if we're not going to cache if encoded_post_data or no_cache or not self._cache or not self._cache_timeout: response = opener.open(url, encoded_post_data) url_data = self._DecompressGzippedResponse(response) opener.close() else: # Unique keys are a combination of the url and the oAuth Consumer Key if self._consumer_key: key = self._consumer_key + ':' + url else: key = url # See if it has been cached before last_cached = self._cache.GetCachedTime(key) # If the cached version is outdated then fetch another and store it if not last_cached or time.time() >= last_cached + self._cache_timeout: try: response = opener.open(url, encoded_post_data) url_data = self._DecompressGzippedResponse(response) self._cache.Set(key, url_data) except urllib2.HTTPError, e: print e opener.close() else: url_data = self._cache.Get(key) # Always return the latest version return url_data class _FileCacheError(Exception): '''Base exception class for FileCache related errors''' class _FileCache(object): DEPTH = 3 def __init__(self,root_directory=None): self._InitializeRootDirectory(root_directory) def Get(self,key): path = self._GetPath(key) if os.path.exists(path): return open(path).read() else: return None def Set(self,key,data): path = self._GetPath(key) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) if not os.path.isdir(directory): raise _FileCacheError('%s exists but is not a directory' % directory) temp_fd, temp_path = tempfile.mkstemp() temp_fp = os.fdopen(temp_fd, 'w') temp_fp.write(data) temp_fp.close() if not path.startswith(self._root_directory): raise _FileCacheError('%s does not appear to live under %s' % (path, self._root_directory)) if os.path.exists(path): os.remove(path) os.rename(temp_path, path) def Remove(self,key): path = self._GetPath(key) if not path.startswith(self._root_directory): raise _FileCacheError('%s does not appear to live under %s' % (path, self._root_directory )) if os.path.exists(path): os.remove(path) def GetCachedTime(self,key): path = self._GetPath(key) if os.path.exists(path): return os.path.getmtime(path) else: return None def _GetUsername(self): '''Attempt to find the username in a cross-platform fashion.''' try: return os.getenv('USER') or \ os.getenv('LOGNAME') or \ os.getenv('USERNAME') or \ os.getlogin() or \ 'nobody' except (AttributeError, IOError, OSError), e: return 'nobody' def _GetTmpCachePath(self): username = self._GetUsername() cache_directory = 'python.cache_' + username return os.path.join(tempfile.gettempdir(), cache_directory) def _InitializeRootDirectory(self, root_directory): if not root_directory: root_directory = self._GetTmpCachePath() root_directory = os.path.abspath(root_directory) if not os.path.exists(root_directory): os.mkdir(root_directory) if not os.path.isdir(root_directory): raise _FileCacheError('%s exists but is not a directory' % root_directory) self._root_directory = root_directory def _GetPath(self,key): try: hashed_key = md5(key).hexdigest() except TypeError: hashed_key = md5.new(key).hexdigest() return os.path.join(self._root_directory, self._GetPrefix(hashed_key), hashed_key) def _GetPrefix(self,hashed_key): return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
150,067
Python
.py
3,903
30.912119
149
0.653064
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,397
compatibility.py
midgetspy_Sick-Beard/lib/hachoir_core/compatibility.py
""" Compatibility constants and functions. This module works on Python 1.5 to 2.5. This module provides: - True and False constants ; - any() and all() function ; - has_yield and has_slice values ; - isinstance() with Python 2.3 behaviour ; - reversed() and sorted() function. True and False constants ======================== Truth constants: True is yes (one) and False is no (zero). >>> int(True), int(False) # int value (1, 0) >>> int(False | True) # and binary operator 1 >>> int(True & False) # or binary operator 0 >>> int(not(True) == False) # not binary operator 1 Warning: on Python smaller than 2.3, True and False are aliases to number 1 and 0. So "print True" will displays 1 and not True. any() function ============== any() returns True if at least one items is True, or False otherwise. >>> any([False, True]) True >>> any([True, True]) True >>> any([False, False]) False all() function ============== all() returns True if all items are True, or False otherwise. This function is just apply binary and operator (&) on all values. >>> all([True, True]) True >>> all([False, True]) False >>> all([False, False]) False has_yield boolean ================= has_yield: boolean which indicatese if the interpreter supports yield keyword. yield keyworkd is available since Python 2.0. has_yield boolean ================= has_slice: boolean which indicates if the interpreter supports slices with step argument or not. slice with step is available since Python 2.3. reversed() and sorted() function ================================ reversed() and sorted() function has been introduced in Python 2.4. It's should returns a generator, but this module it may be a list. >>> data = list("cab") >>> list(sorted(data)) ['a', 'b', 'c'] >>> list(reversed("abc")) ['c', 'b', 'a'] """ import copy import operator # --- True and False constants from Python 2.0 --- # --- Warning: for Python < 2.3, they are aliases for 1 and 0 --- try: True = True False = False except NameError: True = 1 False = 0 # --- any() from Python 2.5 --- try: from __builtin__ import any except ImportError: def any(items): for item in items: if item: return True return False # ---all() from Python 2.5 --- try: from __builtin__ import all except ImportError: def all(items): return reduce(operator.__and__, items) # --- test if interpreter supports yield keyword --- try: eval(compile(""" from __future__ import generators def gen(): yield 1 yield 2 if list(gen()) != [1, 2]: raise KeyError("42") """, "<string>", "exec")) except (KeyError, SyntaxError): has_yield = False else: has_yield = True # --- test if interpreter supports slices (with step argument) --- try: has_slice = eval('"abc"[::-1] == "cba"') except (TypeError, SyntaxError): has_slice = False # --- isinstance with isinstance Python 2.3 behaviour (arg 2 is a type) --- try: if isinstance(1, int): from __builtin__ import isinstance except TypeError: print "Redef isinstance" def isinstance20(a, typea): if type(typea) != type(type): raise TypeError("TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types") return type(typea) != typea isinstance = isinstance20 # --- reversed() from Python 2.4 --- try: from __builtin__ import reversed except ImportError: # if hasYield() == "ok": # code = """ #def reversed(data): # for index in xrange(len(data)-1, -1, -1): # yield data[index]; #reversed""" # reversed = eval(compile(code, "<string>", "exec")) if has_slice: def reversed(data): if not isinstance(data, list): data = list(data) return data[::-1] else: def reversed(data): if not isinstance(data, list): data = list(data) reversed_data = [] for index in xrange(len(data)-1, -1, -1): reversed_data.append(data[index]) return reversed_data # --- sorted() from Python 2.4 --- try: from __builtin__ import sorted except ImportError: def sorted(data): sorted_data = copy.copy(data) sorted_data.sort() return sorted __all__ = ("True", "False", "any", "all", "has_yield", "has_slice", "isinstance", "reversed", "sorted")
4,441
Python
.py
148
26.351351
113
0.62688
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,398
error.py
midgetspy_Sick-Beard/lib/hachoir_core/error.py
""" Functions to display an error (error, warning or information) message. """ from lib.hachoir_core.log import log from lib.hachoir_core.tools import makePrintable import sys, traceback def getBacktrace(empty="Empty backtrace."): """ Try to get backtrace as string. Returns "Error while trying to get backtrace" on failure. """ try: info = sys.exc_info() trace = traceback.format_exception(*info) sys.exc_clear() if trace[0] != "None\n": return "".join(trace) except: # No i18n here (imagine if i18n function calls error...) return "Error while trying to get backtrace" return empty class HachoirError(Exception): """ Parent of all errors in Hachoir library """ def __init__(self, message): message_bytes = makePrintable(message, "ASCII") Exception.__init__(self, message_bytes) self.text = message def __unicode__(self): return self.text # Error classes which may be raised by Hachoir core # FIXME: Add EnvironmentError (IOError or OSError) and AssertionError? # FIXME: Remove ArithmeticError and RuntimeError? HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError, TypeError, ValueError, ArithmeticError, RuntimeError) info = log.info warning = log.warning error = log.error
1,350
Python
.py
39
29.769231
71
0.694253
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)
1,399
bits.py
midgetspy_Sick-Beard/lib/hachoir_core/bits.py
""" Utilities to convert integers and binary strings to binary (number), binary string, number, hexadecimal, etc. """ from lib.hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN from lib.hachoir_core.compatibility import reversed from itertools import chain, repeat from struct import calcsize, unpack, error as struct_error def swap16(value): """ Swap byte between big and little endian of a 16 bits integer. >>> "%x" % swap16(0x1234) '3412' """ return (value & 0xFF) << 8 | (value >> 8) def swap32(value): """ Swap byte between big and little endian of a 32 bits integer. >>> "%x" % swap32(0x12345678) '78563412' """ value = long(value) return ((value & 0x000000FFL) << 24) \ | ((value & 0x0000FF00L) << 8) \ | ((value & 0x00FF0000L) >> 8) \ | ((value & 0xFF000000L) >> 24) def bin2long(text, endian): """ Convert binary number written in a string into an integer. Skip characters differents than "0" and "1". >>> bin2long("110", BIG_ENDIAN) 6 >>> bin2long("110", LITTLE_ENDIAN) 3 >>> bin2long("11 00", LITTLE_ENDIAN) 3 """ assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) bits = [ (ord(character)-ord("0")) \ for character in text if character in "01" ] assert len(bits) != 0 if endian is not BIG_ENDIAN: bits = reversed(bits) value = 0 for bit in bits: value *= 2 value += bit return value def str2hex(value, prefix="", glue=u"", format="%02X"): r""" Convert binary string in hexadecimal (base 16). >>> str2hex("ABC") u'414243' >>> str2hex("\xF0\xAF", glue=" ") u'F0 AF' >>> str2hex("ABC", prefix="0x") u'0x414243' >>> str2hex("ABC", format=r"\x%02X") u'\\x41\\x42\\x43' """ if isinstance(glue, str): glue = unicode(glue) if 0 < len(prefix): text = [prefix] else: text = [] for character in value: text.append(format % ord(character)) return glue.join(text) def countBits(value): """ Count number of bits needed to store a (positive) integer number. >>> countBits(0) 1 >>> countBits(1000) 10 >>> countBits(44100) 16 >>> countBits(18446744073709551615) 64 """ assert 0 <= value count = 1 bits = 1 while (1 << bits) <= value: count += bits value >>= bits bits <<= 1 while 2 <= value: if bits != 1: bits >>= 1 else: bits -= 1 while (1 << bits) <= value: count += bits value >>= bits return count def byte2bin(number, classic_mode=True): """ Convert a byte (integer in 0..255 range) to a binary string. If classic_mode is true (default value), reverse bits. >>> byte2bin(10) '00001010' >>> byte2bin(10, False) '01010000' """ text = "" for i in range(0, 8): if classic_mode: mask = 1 << (7-i) else: mask = 1 << i if (number & mask) == mask: text += "1" else: text += "0" return text def long2raw(value, endian, size=None): r""" Convert a number (positive and not nul) to a raw string. If size is given, add nul bytes to fill to size bytes. >>> long2raw(0x1219, BIG_ENDIAN) '\x12\x19' >>> long2raw(0x1219, BIG_ENDIAN, 4) # 32 bits '\x00\x00\x12\x19' >>> long2raw(0x1219, LITTLE_ENDIAN, 4) # 32 bits '\x19\x12\x00\x00' """ assert (not size and 0 < value) or (0 <= value) assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) text = [] while (value != 0 or text == ""): byte = value % 256 text.append( chr(byte) ) value >>= 8 if size: need = max(size - len(text), 0) else: need = 0 if need: if endian is BIG_ENDIAN: text = chain(repeat("\0", need), reversed(text)) else: text = chain(text, repeat("\0", need)) else: if endian is BIG_ENDIAN: text = reversed(text) return "".join(text) def long2bin(size, value, endian, classic_mode=False): """ Convert a number into bits (in a string): - size: size in bits of the number - value: positive (or nul) number - endian: BIG_ENDIAN (most important bit first) or LITTLE_ENDIAN (least important bit first) - classic_mode (default: False): reverse each packet of 8 bits >>> long2bin(16, 1+4 + (1+8)*256, BIG_ENDIAN) '10100000 10010000' >>> long2bin(16, 1+4 + (1+8)*256, BIG_ENDIAN, True) '00000101 00001001' >>> long2bin(16, 1+4 + (1+8)*256, LITTLE_ENDIAN) '00001001 00000101' >>> long2bin(16, 1+4 + (1+8)*256, LITTLE_ENDIAN, True) '10010000 10100000' """ text = "" assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) assert 0 <= value for index in xrange(size): if (value & 1) == 1: text += "1" else: text += "0" value >>= 1 if endian is LITTLE_ENDIAN: text = text[::-1] result = "" while len(text) != 0: if len(result) != 0: result += " " if classic_mode: result += text[7::-1] else: result += text[:8] text = text[8:] return result def str2bin(value, classic_mode=True): r""" Convert binary string to binary numbers. If classic_mode is true (default value), reverse bits. >>> str2bin("\x03\xFF") '00000011 11111111' >>> str2bin("\x03\xFF", False) '11000000 11111111' """ text = "" for character in value: if text != "": text += " " byte = ord(character) text += byte2bin(byte, classic_mode) return text def _createStructFormat(): """ Create a dictionnary (endian, size_byte) => struct format used by str2long() to convert raw data to positive integer. """ format = { BIG_ENDIAN: {}, LITTLE_ENDIAN: {}, } for struct_format in "BHILQ": try: size = calcsize(struct_format) format[BIG_ENDIAN][size] = '>%s' % struct_format format[LITTLE_ENDIAN][size] = '<%s' % struct_format except struct_error: pass return format _struct_format = _createStructFormat() def str2long(data, endian): r""" Convert a raw data (type 'str') into a long integer. >>> chr(str2long('*', BIG_ENDIAN)) '*' >>> str2long("\x00\x01\x02\x03", BIG_ENDIAN) == 0x10203 True >>> str2long("\x2a\x10", LITTLE_ENDIAN) == 0x102a True >>> str2long("\xff\x14\x2a\x10", BIG_ENDIAN) == 0xff142a10 True >>> str2long("\x00\x01\x02\x03", LITTLE_ENDIAN) == 0x3020100 True >>> str2long("\xff\x14\x2a\x10\xab\x00\xd9\x0e", BIG_ENDIAN) == 0xff142a10ab00d90e True >>> str2long("\xff\xff\xff\xff\xff\xff\xff\xff", BIG_ENDIAN) == (2**64-1) True """ assert 1 <= len(data) <= 32 # arbitrary limit: 256 bits try: return unpack(_struct_format[endian][len(data)], data)[0] except KeyError: pass assert endian in (BIG_ENDIAN, LITTLE_ENDIAN) shift = 0 value = 0 if endian is BIG_ENDIAN: data = reversed(data) for character in data: byte = ord(character) value += (byte << shift) shift += 8 return value
7,401
Python
.py
253
23
86
0.57215
midgetspy/Sick-Beard
2,890
1,507
113
GPL-3.0
9/5/2024, 5:08:58 PM (Europe/Amsterdam)