index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
7,178 | wikitools3.page | move | Move the page
Params are the same as the API:
mvto - page title to move to, the only required param
reason - summary for the log
movetalk - move the corresponding talk page
noredirect - don't create a redirect at the previous title
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
| def move(
self,
mvto,
reason=False,
movetalk=False,
noredirect=False,
watch=False,
unwatch=False,
):
"""Move the page
Params are the same as the API:
mvto - page title to move to, the only required param
reason - summary for the log
movetalk - move the corresponding talk page
noredirect - don't create a redirect at the previous title
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
"""
if not self.title and self.pageid == 0:
self.setPageInfo()
if not self.exists:
raise NoPage
token = self.site.getToken("csrf")
params = {
"action": "move",
"to": mvto,
"token": token,
}
if self.pageid:
params["fromid"] = self.pageid
else:
params["from"] = self.title
if reason:
params["reason"] = reason.encode("utf-8")
if movetalk:
params["movetalk"] = "1"
if noredirect:
params["noredirect"] = "1"
if watch:
params["watch"] = "1"
if unwatch:
params["unwatch"] = "1"
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "move" in result:
self.title = result["move"]["to"]
self.namespace = namespaceDetect(self.title, self.site)
if self.namespace != 0:
self.unprefixedtitle = self.title.split(":", 1)[1]
else:
self.unprefixedtitle = self.title
if not isinstance(self.title, str):
self.title = str(self.title, "utf-8")
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
else:
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
return result
| (self, mvto, reason=False, movetalk=False, noredirect=False, watch=False, unwatch=False) |
7,179 | wikitools3.page | protect | Protect a page
Restrictions and expirations are dictionaries of
protection level/expiry settings, e.g., {'edit':'sysop'} and
{'move':'3 days'}. expirations can also be a string to set
all levels to the same expiration
reason - summary for log
cascade - apply protection to all pages transcluded on the page
| def protect(self, restrictions={}, expirations={}, reason=False, cascade=False):
"""Protect a page
Restrictions and expirations are dictionaries of
protection level/expiry settings, e.g., {'edit':'sysop'} and
{'move':'3 days'}. expirations can also be a string to set
all levels to the same expiration
reason - summary for log
cascade - apply protection to all pages transcluded on the page
"""
if not self.title:
self.setPageInfo()
if not restrictions:
raise ProtectError("No protection levels given")
if len(expirations) > len(restrictions):
raise ProtectError("More expirations than restrictions given")
token = self.site.getToken("csrf")
protections = ""
expiry = ""
if isinstance(expirations, str):
expiry = expirations
for type in restrictions:
if protections:
protections += "|"
protections += type + "=" + restrictions[type]
if isinstance(expirations, dict) and type in expirations:
if expiry:
expiry += "|"
expiry += expirations[type]
elif isinstance(expirations, dict):
if expiry:
expiry += "|"
expiry += "indefinite"
params = {
"action": "protect",
"title": self.title,
"token": token,
"protections": protections,
}
if expiry:
params["expiry"] = expiry
if reason:
params["reason"] = reason
if cascade:
params["cascade"] = ""
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "protect" in result:
self.protection = {}
return result
| (self, restrictions={}, expirations={}, reason=False, cascade=False) |
7,180 | wikitools3.page | setNamespace | Change the namespace number of a page object
Updates the title with the new prefix
newns - integer namespace number
recheck - redo pageinfo checks
| def setNamespace(self, newns, recheck=False):
"""Change the namespace number of a page object
Updates the title with the new prefix
newns - integer namespace number
recheck - redo pageinfo checks
"""
if not newns in self.site.namespaces.keys():
raise BadNamespace
if self.namespace == newns:
return self.namespace
if self.title:
if self.namespace != 0:
bits = self.title.split(":", 1)
nsprefix = bits[0].lower()
for ns in self.site.namespaces:
if nsprefix == self.site.namespaces[ns]["*"].lower():
self.title = bits[1]
break
else:
if self.site.NSaliases:
for ns in self.site.NSaliases:
if nsprefix == ns.lower():
self.title = bits[1]
break
self.namespace = newns
if self.namespace:
self.title = (
self.site.namespaces[self.namespace]["*"] + ":" + self.title
)
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
else:
self.namespace = newns
if recheck:
self.pageid = False
self.setPageInfo()
else:
self.pageid = 0
self.wikitext = ""
self.templates = []
self.links = []
return self.namespace
| (self, newns, recheck=False) |
7,181 | wikitools3.page | setPageInfo | Sets basic page info, required for almost everything | def setPageInfo(self):
"""Sets basic page info, required for almost everything"""
followRedir = self.followRedir
params = {"action": "query"}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if followRedir:
params["redirects"] = ""
req = api.APIRequest(self.site, params)
response = req.query(False)
self.pageid = response["query"]["pages"].keys()[0]
if self.pageid > 0:
self.exists = True
if "missing" in response["query"]["pages"][str(self.pageid)]:
if not self.title:
# Pageids are never recycled, so a bad pageid with no title will never work
raise wiki.WikiError("Bad pageid given with no title")
self.exists = False
if "invalid" in response["query"]["pages"][str(self.pageid)]:
raise BadTitle(self.title)
if "title" in response["query"]["pages"][str(self.pageid)]:
self.title = response["query"]["pages"][str(self.pageid)]["title"].encode(
"utf-8"
)
self.namespace = int(response["query"]["pages"][str(self.pageid)]["ns"])
if self.namespace != 0:
self.unprefixedtitle = self.title.split(":", 1)[1]
else:
self.unprefixedtitle = self.title
self.pageid = int(self.pageid)
if self.pageid < 0:
self.pageid = 0
return self
| (self) |
7,182 | wikitools3.page | setSection | Set a section for the page
section - the section name
number - the section number
| def setSection(self, section=None, number=None):
"""Set a section for the page
section - the section name
number - the section number
"""
if section is None and number is None:
self.section = False
elif number is not None:
try:
self.section = str(int(number))
except ValueError:
raise wiki.WikiError("Section number must be an int")
else:
self.section = self.__getSection(section)
self.wikitext = ""
return self.section
| (self, section=None, number=None) |
7,183 | wikitools3.page | toggleTalk | Switch to and from the talk namespaces
Returns a new page object that's either the talk or non-talk
version of the current page
check and followRedir - same meaning as Page constructor
| def toggleTalk(self, check=True, followRedir=True):
"""Switch to and from the talk namespaces
Returns a new page object that's either the talk or non-talk
version of the current page
check and followRedir - same meaning as Page constructor
"""
if not self.title:
self.setPageInfo()
ns = self.namespace
if ns < 0:
return False
nsname = self.site.namespaces[ns]["*"]
if self.isTalk():
newns = self.site.namespaces[ns - 1]["*"]
else:
newns = self.site.namespaces[ns + 1]["*"]
try:
pagename = self.title.split(nsname + ":", 1)[1]
except:
pagename = self.title
if newns != "":
newname = newns + ":" + pagename
else:
newname = pagename
return Page(self.site, newname, check, followRedir)
| (self, check=True, followRedir=True) |
7,184 | wikitools3.wiki | CookiesExpired | Cookies are expired, needs to be an exception so login() will use the API instead | class CookiesExpired(WikiError):
"""Cookies are expired, needs to be an exception so login() will use the API instead"""
| null |
7,185 | wikitools3.page | EditError | Problem with edit request | class EditError(wiki.WikiError):
"""Problem with edit request"""
| null |
7,186 | wikitools3.wikifile | File | A file on the wiki | class File(page.Page):
"""A file on the wiki"""
def __init__(
self,
wiki,
title,
check=True,
followRedir=False,
section=False,
sectionnumber=False,
pageid=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
"""
page.Page.__init__(
self, wiki, title, check, followRedir, section, sectionnumber, pageid
)
if self.namespace != 6:
self.setNamespace(6, check)
self.usage = []
self.filehistory = []
def getHistory(self, force=False):
warnings.warn(
"""File.getHistory has been renamed to File.getFileHistory""", FutureWarning
)
return self.getFileHistory(force)
def getFileHistory(self, force=False):
if self.filehistory and not force:
return self.filehistory
if self.pageid == 0 and not self.title:
self.setPageInfo()
params = {
"action": "query",
"prop": "imageinfo",
"iilimit": self.site.limit,
}
if self.pageid > 0:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.filehistory = []
for data in req.queryGen():
pid = data["query"]["pages"].keys()[0]
for item in data["query"]["pages"][pid]["imageinfo"]:
self.filehistory.append(item)
return self.filehistory
def getUsage(self, titleonly=False, force=False, namespaces=False):
"""Gets a list of pages that use the file
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.usage and not force:
if titleonly:
if namespaces is not False:
return [p.title for p in self.usage if p.namespace in namespaces]
else:
return [p.title for p in self.usage]
if namespaces is False:
return self.usage
else:
return [p for p in self.usage if p.namespace in namespaces]
else:
ret = []
usage = []
for title in self.__getUsageInternal(namespaces):
usage.append(title)
if titleonly:
ret.append(title.title)
if titleonly:
return ret
if namespaces is False:
self.usage = usage
return usage
def getUsageGen(self, titleonly=False, force=False, namespaces=False):
"""Generator function for pages that use the file
titleonly - set to True to return strings,
else it will return Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.usage and not force:
for title in self.usage:
if namespaces is False or title.namespace in namespaces:
if titleonly:
yield title.title
else:
yield title
else:
if namespaces is False:
self.usage = []
for title in self.__getUsageInternal():
if namespaces is False:
self.usage.append(title)
if titleonly:
yield title.title
else:
yield title
def __getUsageInternal(self, namespaces=False):
params = {
"action": "query",
"list": "imageusage",
"iutitle": self.title,
"iulimit": self.site.limit,
}
if namespaces is not False:
params["iunamespace"] = "|".join([str(ns) for ns in namespaces])
while True:
req = api.APIRequest(self.site, params)
data = req.query(False)
for item in data["query"]["imageusage"]:
yield page.Page(
self.site, item["title"], check=False, followRedir=False
)
try:
params["iucontinue"] = data["query-continue"]["imageusage"][
"iucontinue"
]
except:
break
def __extractToList(self, json, stuff):
list = []
if stuff in json["query"]:
for item in json["query"][stuff]:
list.append(item["title"])
return list
def download(self, width=False, height=False, location=False):
"""Download the image to a local file
width/height - set width OR height of the downloaded image
location - set the filename to save to. If not set, the page title
minus the namespace prefix will be used and saved to the current directory
"""
if self.pageid == 0:
self.setPageInfo()
params = {"action": "query", "prop": "imageinfo", "iiprop": "url"}
if width and height:
raise FileDimensionError("Can't specify both width and height")
if width:
params["iiurlwidth"] = width
if height:
params["iiurlheight"] = height
if self.pageid != 0:
params["pageids"] = self.pageid
elif self.title:
params["titles"] = self.title
else:
self.setPageInfo()
if (
not self.exists
): # Non-existant files may be on a shared repo (e.g. commons)
params["titles"] = self.title
else:
params["pageids"] = self.pageid
req = api.APIRequest(self.site, params)
res = req.query(False)
key = res["query"]["pages"].keys()[0]
url = res["query"]["pages"][key]["imageinfo"][0]["url"]
if not location:
location = self.title.split(":", 1)[1]
opener = urllib.build_opener(urllib.HTTPCookieProcessor(self.site.cookies))
headers = {"User-agent": self.site.useragent}
request = urllib.Request(url, None, headers)
data = opener.open(request)
f = open(location, "wb", 0)
f.write(data.read())
f.close()
return location
def upload(
self, fileobj=None, comment="", url=None, ignorewarnings=False, watch=False
):
"""Upload a file, requires the "poster3" module
fileobj - A file object opened for reading
comment - The log comment, used as the inital page content if the file
doesn't already exist on the wiki
url - A URL to upload the file from, if allowed on the wiki
ignorewarnings - Ignore warnings about duplicate files, etc.
watch - Add the page to your watchlist
"""
if not api.canupload and fileobj:
raise UploadError("The poster3 module is required for file uploading")
if not fileobj and not url:
raise UploadError("Must give either a file object or a URL")
if fileobj and url:
raise UploadError("Cannot give a file and a URL")
if fileobj:
if not isinstance(fileobj, io.IOBase):
raise UploadError(
"If uploading from a file, a file object must be passed"
)
if fileobj.mode not in ["r", "rb", "r+"]:
raise UploadError("File must be readable")
fileobj.seek(0)
params = {
"action": "upload",
"comment": comment,
"filename": self.unprefixedtitle,
"token": self.site.getToken("csrf"),
}
if url:
params["url"] = url
else:
params["file"] = fileobj
if ignorewarnings:
params["ignorewarnings"] = ""
if watch:
params["watch"] = ""
req = api.APIRequest(self.site, params, write=True, multipart=bool(fileobj))
res = req.query()
if "upload" in res:
if res["upload"]["result"] == "Success":
self.wikitext = ""
self.links = []
self.templates = []
self.exists = True
elif res["upload"]["result"] == "Warning":
for warning in res["upload"]["warnings"].keys():
if warning == "duplicate":
print(
"File is a duplicate of "
+ res["upload"]["warnings"]["duplicate"][0]
)
elif warning == "page-exists" or warning == "exists":
print(
"Page already exists: " + res["upload"]["warnings"][warning]
)
else:
print(
"Warning: "
+ warning
+ " "
+ res["upload"]["warnings"][warning]
)
return res
| (wiki, title, check=True, followRedir=False, section=False, sectionnumber=False, pageid=False) |
7,187 | wikitools3.wikifile | __extractToList | null | def __extractToList(self, json, stuff):
list = []
if stuff in json["query"]:
for item in json["query"][stuff]:
list.append(item["title"])
return list
| (self, json, stuff) |
7,188 | wikitools3.wikifile | __getUsageInternal | null | def __getUsageInternal(self, namespaces=False):
params = {
"action": "query",
"list": "imageusage",
"iutitle": self.title,
"iulimit": self.site.limit,
}
if namespaces is not False:
params["iunamespace"] = "|".join([str(ns) for ns in namespaces])
while True:
req = api.APIRequest(self.site, params)
data = req.query(False)
for item in data["query"]["imageusage"]:
yield page.Page(
self.site, item["title"], check=False, followRedir=False
)
try:
params["iucontinue"] = data["query-continue"]["imageusage"][
"iucontinue"
]
except:
break
| (self, namespaces=False) |
7,194 | wikitools3.wikifile | __init__ |
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
| def __init__(
self,
wiki,
title,
check=True,
followRedir=False,
section=False,
sectionnumber=False,
pageid=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
"""
page.Page.__init__(
self, wiki, title, check, followRedir, section, sectionnumber, pageid
)
if self.namespace != 6:
self.setNamespace(6, check)
self.usage = []
self.filehistory = []
| (self, wiki, title, check=True, followRedir=False, section=False, sectionnumber=False, pageid=False) |
7,200 | wikitools3.wikifile | download | Download the image to a local file
width/height - set width OR height of the downloaded image
location - set the filename to save to. If not set, the page title
minus the namespace prefix will be used and saved to the current directory
| def download(self, width=False, height=False, location=False):
"""Download the image to a local file
width/height - set width OR height of the downloaded image
location - set the filename to save to. If not set, the page title
minus the namespace prefix will be used and saved to the current directory
"""
if self.pageid == 0:
self.setPageInfo()
params = {"action": "query", "prop": "imageinfo", "iiprop": "url"}
if width and height:
raise FileDimensionError("Can't specify both width and height")
if width:
params["iiurlwidth"] = width
if height:
params["iiurlheight"] = height
if self.pageid != 0:
params["pageids"] = self.pageid
elif self.title:
params["titles"] = self.title
else:
self.setPageInfo()
if (
not self.exists
): # Non-existant files may be on a shared repo (e.g. commons)
params["titles"] = self.title
else:
params["pageids"] = self.pageid
req = api.APIRequest(self.site, params)
res = req.query(False)
key = res["query"]["pages"].keys()[0]
url = res["query"]["pages"][key]["imageinfo"][0]["url"]
if not location:
location = self.title.split(":", 1)[1]
opener = urllib.build_opener(urllib.HTTPCookieProcessor(self.site.cookies))
headers = {"User-agent": self.site.useragent}
request = urllib.Request(url, None, headers)
data = opener.open(request)
f = open(location, "wb", 0)
f.write(data.read())
f.close()
return location
| (self, width=False, height=False, location=False) |
7,203 | wikitools3.wikifile | getFileHistory | null | def getFileHistory(self, force=False):
if self.filehistory and not force:
return self.filehistory
if self.pageid == 0 and not self.title:
self.setPageInfo()
params = {
"action": "query",
"prop": "imageinfo",
"iilimit": self.site.limit,
}
if self.pageid > 0:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.filehistory = []
for data in req.queryGen():
pid = data["query"]["pages"].keys()[0]
for item in data["query"]["pages"][pid]["imageinfo"]:
self.filehistory.append(item)
return self.filehistory
| (self, force=False) |
7,204 | wikitools3.wikifile | getHistory | null | def getHistory(self, force=False):
warnings.warn(
"""File.getHistory has been renamed to File.getFileHistory""", FutureWarning
)
return self.getFileHistory(force)
| (self, force=False) |
7,209 | wikitools3.wikifile | getUsage | Gets a list of pages that use the file
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
| def getUsage(self, titleonly=False, force=False, namespaces=False):
"""Gets a list of pages that use the file
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.usage and not force:
if titleonly:
if namespaces is not False:
return [p.title for p in self.usage if p.namespace in namespaces]
else:
return [p.title for p in self.usage]
if namespaces is False:
return self.usage
else:
return [p for p in self.usage if p.namespace in namespaces]
else:
ret = []
usage = []
for title in self.__getUsageInternal(namespaces):
usage.append(title)
if titleonly:
ret.append(title.title)
if titleonly:
return ret
if namespaces is False:
self.usage = usage
return usage
| (self, titleonly=False, force=False, namespaces=False) |
7,210 | wikitools3.wikifile | getUsageGen | Generator function for pages that use the file
titleonly - set to True to return strings,
else it will return Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
| def getUsageGen(self, titleonly=False, force=False, namespaces=False):
"""Generator function for pages that use the file
titleonly - set to True to return strings,
else it will return Page objects
force - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.usage and not force:
for title in self.usage:
if namespaces is False or title.namespace in namespaces:
if titleonly:
yield title.title
else:
yield title
else:
if namespaces is False:
self.usage = []
for title in self.__getUsageInternal():
if namespaces is False:
self.usage.append(title)
if titleonly:
yield title.title
else:
yield title
| (self, titleonly=False, force=False, namespaces=False) |
7,220 | wikitools3.wikifile | upload | Upload a file, requires the "poster3" module
fileobj - A file object opened for reading
comment - The log comment, used as the inital page content if the file
doesn't already exist on the wiki
url - A URL to upload the file from, if allowed on the wiki
ignorewarnings - Ignore warnings about duplicate files, etc.
watch - Add the page to your watchlist
| def upload(
self, fileobj=None, comment="", url=None, ignorewarnings=False, watch=False
):
"""Upload a file, requires the "poster3" module
fileobj - A file object opened for reading
comment - The log comment, used as the inital page content if the file
doesn't already exist on the wiki
url - A URL to upload the file from, if allowed on the wiki
ignorewarnings - Ignore warnings about duplicate files, etc.
watch - Add the page to your watchlist
"""
if not api.canupload and fileobj:
raise UploadError("The poster3 module is required for file uploading")
if not fileobj and not url:
raise UploadError("Must give either a file object or a URL")
if fileobj and url:
raise UploadError("Cannot give a file and a URL")
if fileobj:
if not isinstance(fileobj, io.IOBase):
raise UploadError(
"If uploading from a file, a file object must be passed"
)
if fileobj.mode not in ["r", "rb", "r+"]:
raise UploadError("File must be readable")
fileobj.seek(0)
params = {
"action": "upload",
"comment": comment,
"filename": self.unprefixedtitle,
"token": self.site.getToken("csrf"),
}
if url:
params["url"] = url
else:
params["file"] = fileobj
if ignorewarnings:
params["ignorewarnings"] = ""
if watch:
params["watch"] = ""
req = api.APIRequest(self.site, params, write=True, multipart=bool(fileobj))
res = req.query()
if "upload" in res:
if res["upload"]["result"] == "Success":
self.wikitext = ""
self.links = []
self.templates = []
self.exists = True
elif res["upload"]["result"] == "Warning":
for warning in res["upload"]["warnings"].keys():
if warning == "duplicate":
print(
"File is a duplicate of "
+ res["upload"]["warnings"]["duplicate"][0]
)
elif warning == "page-exists" or warning == "exists":
print(
"Page already exists: " + res["upload"]["warnings"][warning]
)
else:
print(
"Warning: "
+ warning
+ " "
+ res["upload"]["warnings"][warning]
)
return res
| (self, fileobj=None, comment='', url=None, ignorewarnings=False, watch=False) |
7,221 | wikitools3.wikifile | FileDimensionError | Invalid dimensions | class FileDimensionError(wiki.WikiError):
"""Invalid dimensions"""
| null |
7,222 | urllib.request | HTTPPasswordMgrWithDefaultRealm | null | class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
| () |
7,223 | urllib.request | __init__ | null | def __init__(self):
self.passwd = {}
| (self) |
7,224 | urllib.request | add_password | null | def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, str):
uri = [uri]
if realm not in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
self.reduce_uri(u, default_port) for u in uri)
self.passwd[realm][reduced_uri] = (user, passwd)
| (self, realm, uri, user, passwd) |
7,225 | urllib.request | find_user_password | null | def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
| (self, realm, authuri) |
7,226 | urllib.request | is_suburi | Check if test is below base in a URI tree
Both args must be URIs in reduced form.
| def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
prefix = base[1]
if prefix[-1:] != '/':
prefix += '/'
return test[1].startswith(prefix)
| (self, base, test) |
7,227 | urllib.request | reduce_uri | Accept authority or URI and extract only the authority and path. | def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = _splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
| (self, uri, default_port=True) |
7,228 | wikitools3.wiki | Namespace |
Class for namespace 'constants'
Names are based on canonical (non-localized) names
This functions as an integer in every way, except that the OR operator ( | )
is overridden to produce a string namespace list for use in API queries
wikiobj.NS_MAIN|wikiobj.NS_USER|wikiobj.NS_PROJECT returns '0|2|4'
| class Namespace(int):
"""
Class for namespace 'constants'
Names are based on canonical (non-localized) names
This functions as an integer in every way, except that the OR operator ( | )
is overridden to produce a string namespace list for use in API queries
wikiobj.NS_MAIN|wikiobj.NS_USER|wikiobj.NS_PROJECT returns '0|2|4'
"""
def __or__(self, other):
return "|".join([str(self), str(other)])
def __ror__(self, other):
return "|".join([str(other), str(self)])
| null |
7,229 | wikitools3.wiki | __or__ | null | def __or__(self, other):
return "|".join([str(self), str(other)])
| (self, other) |
7,230 | wikitools3.wiki | __ror__ | null | def __ror__(self, other):
return "|".join([str(other), str(self)])
| (self, other) |
7,231 | wikitools3.page | NoPage | Non-existent page | class NoPage(wiki.WikiError):
"""Non-existent page"""
| null |
7,232 | wikitools3.page | Page | A page on the wiki | class Page(object):
"""A page on the wiki"""
def __init__(
self,
site,
title=False,
check=True,
followRedir=True,
section=False,
sectionnumber=None,
pageid=False,
namespace=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
namespace - use to set the namespace prefix *if its not already in the title*
"""
# Initialize instance vars from function args
if not title and not pageid:
raise wiki.WikiError("No title or pageid given")
self.site = site
if pageid:
self.pageid = int(pageid)
else:
self.pageid = 0
self.followRedir = followRedir
self.title = title
self.unprefixedtitle = False # will be set later
self.urltitle = ""
self.wikitext = ""
self.templates = []
self.links = []
self.categories = []
self.exists = True # If we're not going to check, assume it does
self.protection = {}
self.namespace = namespace
# Things that need to be done before anything else
if self.title:
self.title = self.title.replace("_", " ")
if self.namespace:
if namespace not in self.site.namespaces.keys():
raise BadNamespace(namespace)
if self.title:
self.unprefixedtitle = self.title
self.title = ":".join(
(self.site.namespaces[self.namespace]["*"], self.title)
)
if int(self.namespace) == 0 and self.title:
self.namespace = int(self.namespace)
self.unprefixedtitle = self.title
# Setting page info with API, should set:
# pageid, exists, title, unprefixedtitle, namespace
if check:
self.setPageInfo()
else:
if self.namespace is False and self.title:
self.namespace = namespaceDetect(self.title, self.site)
if self.namespace != 0:
nsname = self.site.namespaces[self.namespace]["*"]
self.unprefixedtitle = self.title.split(":", 1)[1]
self.title = ":".join((nsname, self.unprefixedtitle))
else:
self.unprefixedtitle = self.title
if section or sectionnumber is not None:
self.setSection(section, sectionnumber)
else:
self.section = False
if title:
if not isinstance(self.title, str):
self.title = str(self.title, "utf-8")
if not isinstance(self.unprefixedtitle, str):
self.unprefixedtitle = str(self.unprefixedtitle, "utf-8")
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
def setPageInfo(self):
"""Sets basic page info, required for almost everything"""
followRedir = self.followRedir
params = {"action": "query"}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if followRedir:
params["redirects"] = ""
req = api.APIRequest(self.site, params)
response = req.query(False)
self.pageid = response["query"]["pages"].keys()[0]
if self.pageid > 0:
self.exists = True
if "missing" in response["query"]["pages"][str(self.pageid)]:
if not self.title:
# Pageids are never recycled, so a bad pageid with no title will never work
raise wiki.WikiError("Bad pageid given with no title")
self.exists = False
if "invalid" in response["query"]["pages"][str(self.pageid)]:
raise BadTitle(self.title)
if "title" in response["query"]["pages"][str(self.pageid)]:
self.title = response["query"]["pages"][str(self.pageid)]["title"].encode(
"utf-8"
)
self.namespace = int(response["query"]["pages"][str(self.pageid)]["ns"])
if self.namespace != 0:
self.unprefixedtitle = self.title.split(":", 1)[1]
else:
self.unprefixedtitle = self.title
self.pageid = int(self.pageid)
if self.pageid < 0:
self.pageid = 0
return self
def setNamespace(self, newns, recheck=False):
"""Change the namespace number of a page object
Updates the title with the new prefix
newns - integer namespace number
recheck - redo pageinfo checks
"""
if not newns in self.site.namespaces.keys():
raise BadNamespace
if self.namespace == newns:
return self.namespace
if self.title:
if self.namespace != 0:
bits = self.title.split(":", 1)
nsprefix = bits[0].lower()
for ns in self.site.namespaces:
if nsprefix == self.site.namespaces[ns]["*"].lower():
self.title = bits[1]
break
else:
if self.site.NSaliases:
for ns in self.site.NSaliases:
if nsprefix == ns.lower():
self.title = bits[1]
break
self.namespace = newns
if self.namespace:
self.title = (
self.site.namespaces[self.namespace]["*"] + ":" + self.title
)
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
else:
self.namespace = newns
if recheck:
self.pageid = False
self.setPageInfo()
else:
self.pageid = 0
self.wikitext = ""
self.templates = []
self.links = []
return self.namespace
def setSection(self, section=None, number=None):
"""Set a section for the page
section - the section name
number - the section number
"""
if section is None and number is None:
self.section = False
elif number is not None:
try:
self.section = str(int(number))
except ValueError:
raise wiki.WikiError("Section number must be an int")
else:
self.section = self.__getSection(section)
self.wikitext = ""
return self.section
def __getSection(self, section):
if not self.title:
self.setPageInfo()
params = {"action": "parse", "page": self.title, "prop": "sections"}
number = False
req = api.APIRequest(self.site, params)
response = req.query()
for item in response["parse"]["sections"]:
if section == item["line"] or section == item["anchor"]:
if item["index"].startswith(
"T"
): # TODO: It would be cool if it set the page title to the template in this case
continue
number = item["index"]
break
return number
def canHaveSubpages(self):
"""Is the page in a namespace that allows subpages?"""
if not self.title:
self.setPageInfo()
return "subpages" in self.site.namespaces[self.namespace]
def isRedir(self):
"""Is the page a redirect?"""
params = {"action": "query", "redirects": ""}
if not self.exists:
raise NoPage
if self.pageid != 0 and self.exists:
params["pageids"] = self.pageid
elif self.title:
params["titles"] = self.title
else:
self.setPageInfo()
if self.pageid != 0 and self.exists:
params["pageids"] = self.pageid
else:
raise NoPage
req = api.APIRequest(self.site, params)
res = req.query(False)
if "redirects" in res["query"]:
return True
else:
return False
def isTalk(self):
"""Is the page a discussion page?"""
if not self.title:
self.setPageInfo()
return self.namespace % 2 == 1 and self.namespace >= 0
def toggleTalk(self, check=True, followRedir=True):
"""Switch to and from the talk namespaces
Returns a new page object that's either the talk or non-talk
version of the current page
check and followRedir - same meaning as Page constructor
"""
if not self.title:
self.setPageInfo()
ns = self.namespace
if ns < 0:
return False
nsname = self.site.namespaces[ns]["*"]
if self.isTalk():
newns = self.site.namespaces[ns - 1]["*"]
else:
newns = self.site.namespaces[ns + 1]["*"]
try:
pagename = self.title.split(nsname + ":", 1)[1]
except:
pagename = self.title
if newns != "":
newname = newns + ":" + pagename
else:
newname = pagename
return Page(self.site, newname, check, followRedir)
def getWikiText(self, expandtemplates=False, force=False):
"""Gets the Wikitext of the page
expandtemplates - expand the templates to wikitext instead of transclusions
force - load the text even if we already loaded it before
"""
if self.wikitext and not force:
return self.wikitext
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "revisions",
"rvprop": "content|timestamp",
"rvlimit": "1",
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if expandtemplates:
params["rvexpandtemplates"] = "1"
if self.section is not False:
params["rvsection"] = self.section
req = api.APIRequest(self.site, params)
response = req.query(False)
if self.pageid == 0:
self.pageid = int(response["query"]["pages"].keys()[0])
if self.pageid == -1:
self.exists == False
raise NoPage
self.wikitext = response["query"]["pages"][str(self.pageid)]["revisions"][0][
"*"
].encode("utf-8")
self.lastedittime = response["query"]["pages"][str(self.pageid)]["revisions"][
0
]["timestamp"]
return self.wikitext
def getLinks(self, force=False):
"""Gets a list of all the internal links *on* the page
force - load the list even if we already loaded it before
"""
if self.links and not force:
return self.links
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "links",
"pllimit": self.site.limit,
}
if self.pageid > 0:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.links = []
for data in req.queryGen():
self.links.extend(self.__extractToList(data, "links"))
return self.links
def getProtection(self, force=False):
"""Returns the current protection status of the page"""
if self.protection and not force:
return self.protection
if self.pageid == 0 and not self.title:
self.setPageInfo()
params = {
"action": "query",
"prop": "info",
"inprop": "protection",
}
if not self.exists or self.pageid <= 0:
params["titles"] = self.title
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
response = req.query(False)
for pr in response["query"].values()[0].values()[0]["protection"]:
if pr["level"]:
if pr["expiry"] == "infinity":
expiry = "infinity"
else:
expiry = datetime.datetime.strptime(
pr["expiry"], "%Y-%m-%dT%H:%M:%SZ"
)
self.protection[pr["type"]] = {"expiry": expiry, "level": pr["level"]}
return self.protection
def getTemplates(self, force=False):
"""Gets all list of all the templates on the page
force - load the list even if we already loaded it before
"""
if self.templates and not force:
return self.templates
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "templates",
"tllimit": self.site.limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.templates = []
for data in req.queryGen():
self.templates.extend(self.__extractToList(data, "templates"))
return self.templates
def getCategories(self, force=False):
"""Gets all list of all the categories on the page
force - load the list even if we already loaded it before
"""
if self.categories and not force:
return self.categories
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "categories",
"cllimit": self.site.limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.categories = []
for data in req.queryGen():
self.categories.extend(self.__extractToList(data, "categories"))
return self.categories
def getHistory(self, direction="older", content=True, limit="all"):
"""Get the history of a page
direction - 2 options: 'older' (default) - start with the current revision and get older ones
'newer' - start with the oldest revision and get newer ones
content - If False, get only metadata (timestamp, edit summary, user, etc)
If True (default), also get the revision text
limit - Only retrieve a certain number of revisions. If 'all' (default), all revisions are returned
The data is returned in essentially the same format as the API, a list of dicts that look like:
{u'*': u"Page content", # Only returned when content=True
u'comment': u'Edit summary',
u'contentformat': u'text/x-wiki', # Only returned when content=True
u'contentmodel': u'wikitext', # Only returned when content=True
u'parentid': 139946, # id of previous revision
u'revid': 139871, # revision id
u'sha1': u'0a5cec3ca3e084e767f00c9a5645c17ac27b2757', # sha1 hash of page content
u'size': 129, # size of page in bytes
u'timestamp': u'2002-08-05T14:11:27Z', # timestamp of edit
u'user': u'Username',
u'userid': 48 # user id
}
Note that unlike other get* functions, the data is not cached
"""
max = limit
if limit == "all":
max = float("inf")
if limit == "all" or limit > self.site.limit:
limit = self.site.limit
history = []
rvc = None
while True:
revs, rvc = self.__getHistoryInternal(direction, content, limit, rvc)
history = history + revs
if len(history) == max or rvc is None:
break
if max - len(history) < self.site.limit:
limit = max - len(history)
return history
def getHistoryGen(self, direction="older", content=True, limit="all"):
"""Generator function for page history
The interface is the same as getHistory, but it will only retrieve 1 revision at a time.
This will be slower and have much higher network overhead, but does not require storing
the entire page history in memory
"""
max = limit
count = 0
rvc = None
while True:
revs, rvc = self.__getHistoryInternal(direction, content, 1, rvc)
yield revs[0]
count += 1
if count == max or rvc is None:
break
def __getHistoryInternal(self, direction, content, limit, rvcontinue):
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
if direction != "newer" and direction != "older":
raise wiki.WikiError("direction must be 'newer' or 'older'")
params = {
"action": "query",
"prop": "revisions",
"rvdir": direction,
"rvprop": "ids|flags|timestamp|user|userid|size|sha1|comment",
"continue": "",
"rvlimit": limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if content:
params["rvprop"] += "|content"
if rvcontinue:
params["continue"] = rvcontinue["continue"]
params["rvcontinue"] = rvcontinue["rvcontinue"]
req = api.APIRequest(self.site, params)
response = req.query(False)
id = response["query"]["pages"].keys()[0]
if not self.pageid:
self.pageid = int(id)
revs = response["query"]["pages"][id]["revisions"]
rvc = None
if "continue" in response:
rvc = response["continue"]
return (revs, rvc)
def __extractToList(self, json, stuff):
list = []
if self.pageid == 0:
self.pageid = json["query"]["pages"].keys()[0]
if stuff in json["query"]["pages"][str(self.pageid)]:
for item in json["query"]["pages"][str(self.pageid)][stuff]:
list.append(item["title"])
return list
def edit(self, *args, **kwargs):
"""Edit the page
Arguments are a subset of the API's action=edit arguments, valid arguments
are defined in the validargs set
To skip the MD5 check, set "skipmd5" keyword argument to True
http://www.mediawiki.org/wiki/API:Edit_-_Create%26Edit_pages#Parameters
For backwards compatibility:
'newtext' is equivalent to 'text'
'basetime' is equivalent to 'basetimestamp'
"""
validargs = set(
[
"text",
"summary",
"minor",
"notminor",
"bot",
"basetimestamp",
"starttimestamp",
"recreate",
"createonly",
"nocreate",
"watch",
"unwatch",
"watchlist",
"prependtext",
"appendtext",
"section",
"captchaword",
"captchaid",
]
)
# For backwards compatibility
if "newtext" in kwargs:
kwargs["text"] = kwargs["newtext"]
del kwargs["newtext"]
if "basetime" in kwargs:
kwargs["basetimestamp"] = kwargs["basetime"]
del kwargs["basetime"]
if len(args) and "text" not in kwargs:
kwargs["text"] = args[0]
skipmd5 = False
if "skipmd5" in kwargs and kwargs["skipmd5"]:
skipmd5 = True
invalid = set(kwargs.keys()).difference(validargs)
if invalid:
for arg in invalid:
del kwargs[arg]
if not self.title:
self.setPageInfo()
if not "section" in kwargs and self.section is not False:
kwargs["section"] = self.section
if (
not "text" in kwargs
and not "prependtext" in kwargs
and not "appendtext" in kwargs
):
raise EditError("No text specified")
if "prependtext" in kwargs and "section" in kwargs:
raise EditError("Bad param combination")
if "createonly" in kwargs and "nocreate" in kwargs:
raise EditError("Bad param combination")
token = self.site.getToken("csrf")
if "text" in kwargs:
hashtext = kwargs["text"]
elif "prependtext" in kwargs and "appendtext" in kwargs:
hashtext = kwargs["prependtext"] + kwargs["appendtext"]
elif "prependtext" in kwargs:
hashtext = kwargs["prependtext"]
else:
hashtext = kwargs["appendtext"]
params = {
"action": "edit",
"title": self.title,
"token": token,
}
if not skipmd5:
if not isinstance(hashtext, str):
hashtext = str(hashtext)
hashtext = unicodedata.normalize("NFC", hashtext).encode("utf8")
params["md5"] = md5(hashtext).hexdigest()
params.update(kwargs)
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "edit" in result and result["edit"]["result"] == "Success":
self.wikitext = ""
self.links = []
self.templates = []
self.exists = True
return result
def move(
self,
mvto,
reason=False,
movetalk=False,
noredirect=False,
watch=False,
unwatch=False,
):
"""Move the page
Params are the same as the API:
mvto - page title to move to, the only required param
reason - summary for the log
movetalk - move the corresponding talk page
noredirect - don't create a redirect at the previous title
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
"""
if not self.title and self.pageid == 0:
self.setPageInfo()
if not self.exists:
raise NoPage
token = self.site.getToken("csrf")
params = {
"action": "move",
"to": mvto,
"token": token,
}
if self.pageid:
params["fromid"] = self.pageid
else:
params["from"] = self.title
if reason:
params["reason"] = reason.encode("utf-8")
if movetalk:
params["movetalk"] = "1"
if noredirect:
params["noredirect"] = "1"
if watch:
params["watch"] = "1"
if unwatch:
params["unwatch"] = "1"
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "move" in result:
self.title = result["move"]["to"]
self.namespace = namespaceDetect(self.title, self.site)
if self.namespace != 0:
self.unprefixedtitle = self.title.split(":", 1)[1]
else:
self.unprefixedtitle = self.title
if not isinstance(self.title, str):
self.title = str(self.title, "utf-8")
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
else:
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
return result
def protect(self, restrictions={}, expirations={}, reason=False, cascade=False):
"""Protect a page
Restrictions and expirations are dictionaries of
protection level/expiry settings, e.g., {'edit':'sysop'} and
{'move':'3 days'}. expirations can also be a string to set
all levels to the same expiration
reason - summary for log
cascade - apply protection to all pages transcluded on the page
"""
if not self.title:
self.setPageInfo()
if not restrictions:
raise ProtectError("No protection levels given")
if len(expirations) > len(restrictions):
raise ProtectError("More expirations than restrictions given")
token = self.site.getToken("csrf")
protections = ""
expiry = ""
if isinstance(expirations, str):
expiry = expirations
for type in restrictions:
if protections:
protections += "|"
protections += type + "=" + restrictions[type]
if isinstance(expirations, dict) and type in expirations:
if expiry:
expiry += "|"
expiry += expirations[type]
elif isinstance(expirations, dict):
if expiry:
expiry += "|"
expiry += "indefinite"
params = {
"action": "protect",
"title": self.title,
"token": token,
"protections": protections,
}
if expiry:
params["expiry"] = expiry
if reason:
params["reason"] = reason
if cascade:
params["cascade"] = ""
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "protect" in result:
self.protection = {}
return result
def delete(self, reason=False, watch=False, unwatch=False):
"""Delete the page
reason - summary for log
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
"""
if not self.title and self.pageid == 0:
self.setPageInfo()
if not self.exists:
raise NoPage
token = self.site.getToken("csrf")
params = {
"action": "delete",
"token": token,
}
if self.pageid:
params["pageid"] = self.pageid
else:
params["title"] = self.title
if reason:
params["reason"] = reason.encode("utf-8")
if watch:
params["watch"] = "1"
if unwatch:
params["unwatch"] = "1"
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "delete" in result:
self.pageid = 0
self.exists = False
self.wikitext = ""
self.templates = ""
self.links = ""
self.protection = {}
self.section = False
return result
def __hash__(self):
return int(self.pageid) ^ hash(self.site.apibase)
def __str__(self):
if self.title:
title = self.title
else:
title = "pageid: " + self.pageid
return (
self.__class__.__name__
+ " "
+ repr(title)
+ " from "
+ repr(self.site.domain)
)
def __repr__(self):
if self.title:
title = self.title
else:
title = "pageid: " + self.pageid
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(title)
+ " using "
+ repr(self.site.apibase)
+ ">"
)
def __eq__(self, other):
if not isinstance(other, Page):
return False
if self.title:
if self.title == other.title and self.site == other.site:
return True
else:
if self.pageid == other.pageid and self.site == other.site:
return True
return False
def __ne__(self, other):
if not isinstance(other, Page):
return True
if self.title:
if self.title == other.title and self.site == other.site:
return False
else:
if self.pageid == other.pageid and self.site == other.site:
return False
return True
| (site, title=False, check=True, followRedir=True, section=False, sectionnumber=None, pageid=False, namespace=False) |
7,238 | wikitools3.page | __init__ |
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
namespace - use to set the namespace prefix *if its not already in the title*
| def __init__(
self,
site,
title=False,
check=True,
followRedir=True,
section=False,
sectionnumber=None,
pageid=False,
namespace=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
namespace - use to set the namespace prefix *if its not already in the title*
"""
# Initialize instance vars from function args
if not title and not pageid:
raise wiki.WikiError("No title or pageid given")
self.site = site
if pageid:
self.pageid = int(pageid)
else:
self.pageid = 0
self.followRedir = followRedir
self.title = title
self.unprefixedtitle = False # will be set later
self.urltitle = ""
self.wikitext = ""
self.templates = []
self.links = []
self.categories = []
self.exists = True # If we're not going to check, assume it does
self.protection = {}
self.namespace = namespace
# Things that need to be done before anything else
if self.title:
self.title = self.title.replace("_", " ")
if self.namespace:
if namespace not in self.site.namespaces.keys():
raise BadNamespace(namespace)
if self.title:
self.unprefixedtitle = self.title
self.title = ":".join(
(self.site.namespaces[self.namespace]["*"], self.title)
)
if int(self.namespace) == 0 and self.title:
self.namespace = int(self.namespace)
self.unprefixedtitle = self.title
# Setting page info with API, should set:
# pageid, exists, title, unprefixedtitle, namespace
if check:
self.setPageInfo()
else:
if self.namespace is False and self.title:
self.namespace = namespaceDetect(self.title, self.site)
if self.namespace != 0:
nsname = self.site.namespaces[self.namespace]["*"]
self.unprefixedtitle = self.title.split(":", 1)[1]
self.title = ":".join((nsname, self.unprefixedtitle))
else:
self.unprefixedtitle = self.title
if section or sectionnumber is not None:
self.setSection(section, sectionnumber)
else:
self.section = False
if title:
if not isinstance(self.title, str):
self.title = str(self.title, "utf-8")
if not isinstance(self.unprefixedtitle, str):
self.unprefixedtitle = str(self.unprefixedtitle, "utf-8")
self.urltitle = (
urllib.parse.quote(self.title.encode("utf-8"))
.replace("%20", "_")
.replace("%2F", "/")
)
| (self, site, title=False, check=True, followRedir=True, section=False, sectionnumber=None, pageid=False, namespace=False) |
7,260 | wikitools3.page | ProtectError | Problem with protection request | class ProtectError(wiki.WikiError):
"""Problem with protection request"""
| null |
7,262 | wikitools3.wikifile | UploadError | Error during uploading | class UploadError(wiki.WikiError):
"""Error during uploading"""
| null |
7,263 | wikitools3.user | User | A user on the wiki | class User:
"""A user on the wiki"""
def __init__(self, site, name, check=True):
"""
wiki - A wiki object
name - The username, as a string
check - Checks for existence, normalizes name
"""
self.site = site
self.name = name.strip()
if not isinstance(self.name, str):
self.name = str(self.name, "utf8")
self.exists = True # If we're not going to check, assume it does
self.blocked = None # So we can tell the difference between blocked/not blocked/haven't checked
self.editcount = -1
self.groups = []
self.id = 0
if check:
self.setUserInfo()
self.isIP = False
self.IPcheck()
self.page = page.Page(
self.site,
":".join([self.site.namespaces[2]["*"], self.name]),
check=check,
followRedir=False,
)
def IPcheck(self):
try: # IPv4 check
s = socket.inet_aton(self.name.replace(" ", "_"))
if socket.inet_ntoa(s) == self.name:
self.isIP = True
self.exists = False
return
except:
pass
try:
s = socket.inet_pton(socket.AF_INET6, self.name.replace(" ", "_"))
if self.IPnorm(socket.inet_ntop(socket.AF_INET6, s)) == self.IPnorm(
self.name
):
self.isIP = True
self.exists = False
self.name = self.IPnorm(self.name)
return
except:
pass
def IPnorm(self, ip):
"""This is basically a port of MediaWiki's IP::sanitizeIP but assuming no CIDR ranges"""
ip = ip.upper()
# Expand zero abbreviations
abbrevPos = ip.find("::")
if abbrevPos != -1:
addressEnd = len(ip) - 1
# If the '::' is at the beginning...
if abbrevPos == 0:
repeat = "0:"
extra = "0" if ip == "::" else ""
pad = 9
elif abbrevPos == addressEnd - 1:
repeat = ":0"
extra = ""
pad = 9
else:
repeat = ":0"
extra = ":"
pad = 8
ip = ip.replace("::", repeat * (pad - ip.count(":")) + extra)
# Remove leading zereos from each bloc as needed
ip = re.sub("/(^|:)0+(([0-9A-Fa-f]{1,4}))/", "\1\2", ip)
return ip
def setUserInfo(self):
"""Sets basic user info"""
params = {
"action": "query",
"list": "users",
"ususers": self.name,
"usprop": "blockinfo|groups|editcount",
}
req = api.APIRequest(self.site, params)
response = req.query(False)
user = response["query"]["users"][0]
self.name = user["name"]
if "missing" in user or "invalid" in user:
self.exists = False
return
self.id = int(user["userid"])
self.editcount = int(user["editcount"])
if "groups" in user:
self.groups = user["groups"]
if "blockedby" in user:
self.blocked = True
else:
self.blocked = False
return self
def getTalkPage(self, check=True, followRedir=False):
"""Convenience function to get an object for the user's talk page"""
return page.Page(
self.site,
":".join([self.site.namespaces[3]["*"], self.name]),
check=check,
followRedir=False,
)
def isBlocked(self, force=False):
"""Determine if a user is blocked"""
if self.blocked is not None and not force:
return self.blocked
params = {
"action": "query",
"list": "blocks",
"bkusers": self.name,
"bkprop": "id",
}
req = api.APIRequest(self.site, params)
res = req.query(False)
if len(res["query"]["blocks"]) > 0:
self.blocked = True
else:
self.blocked = False
return self.blocked
def block(
self,
reason=False,
expiry=False,
anononly=False,
nocreate=False,
autoblock=False,
noemail=False,
hidename=False,
allowusertalk=False,
reblock=False,
):
"""Block the user
Params are the same as the API
reason - block reason
expiry - block expiration
anononly - block anonymous users only
nocreate - disable account creation
autoblock - block IP addresses used by the user
noemail - block user from sending email through the site
hidename - hide the username from the log (requires hideuser right)
allowusertalk - allow the user to edit their talk page
reblock - overwrite existing block
"""
token = self.site.getToken("csrf")
params = {"action": "block", "user": self.name, "token": token}
if reason:
params["reason"] = reason
if expiry:
params["expiry"] = expiry
if anononly:
params["anononly"] = ""
if nocreate:
params["nocreate"] = ""
if autoblock:
params["autoblock"] = ""
if noemail:
params["noemail"] = ""
if hidename:
params["hidename"] = ""
if allowusertalk:
params["allowusertalk"] = ""
if reblock:
params["reblock"] = ""
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if "block" in res:
self.blocked = True
return res
def unblock(self, reason=False):
"""Unblock the user
reason - reason for the log
"""
token = self.site.getToken("csrf")
params = {"action": "unblock", "user": self.name, "token": token}
if reason:
params["reason"] = reason
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if "unblock" in res:
self.blocked = False
return res
def __hash__(self):
return int(self.name) ^ hash(self.site.apibase)
def __eq__(self, other):
if not isinstance(other, User):
return False
if self.name == other.name and self.site == other.site:
return True
return False
def __ne__(self, other):
if not isinstance(other, User):
return True
if self.name == other.name and self.site == other.site:
return False
return True
def __str__(self):
return (
self.__class__.__name__
+ " "
+ repr(self.name)
+ " on "
+ repr(self.site.domain)
)
def __repr__(self):
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(self.name)
+ " on "
+ repr(self.site.apibase)
+ ">"
)
| (site, name, check=True) |
7,264 | wikitools3.user | IPcheck | null | def IPcheck(self):
try: # IPv4 check
s = socket.inet_aton(self.name.replace(" ", "_"))
if socket.inet_ntoa(s) == self.name:
self.isIP = True
self.exists = False
return
except:
pass
try:
s = socket.inet_pton(socket.AF_INET6, self.name.replace(" ", "_"))
if self.IPnorm(socket.inet_ntop(socket.AF_INET6, s)) == self.IPnorm(
self.name
):
self.isIP = True
self.exists = False
self.name = self.IPnorm(self.name)
return
except:
pass
| (self) |
7,265 | wikitools3.user | IPnorm | This is basically a port of MediaWiki's IP::sanitizeIP but assuming no CIDR ranges | def IPnorm(self, ip):
"""This is basically a port of MediaWiki's IP::sanitizeIP but assuming no CIDR ranges"""
ip = ip.upper()
# Expand zero abbreviations
abbrevPos = ip.find("::")
if abbrevPos != -1:
addressEnd = len(ip) - 1
# If the '::' is at the beginning...
if abbrevPos == 0:
repeat = "0:"
extra = "0" if ip == "::" else ""
pad = 9
elif abbrevPos == addressEnd - 1:
repeat = ":0"
extra = ""
pad = 9
else:
repeat = ":0"
extra = ":"
pad = 8
ip = ip.replace("::", repeat * (pad - ip.count(":")) + extra)
# Remove leading zereos from each bloc as needed
ip = re.sub("/(^|:)0+(([0-9A-Fa-f]{1,4}))/", "\1\2", ip)
return ip
| (self, ip) |
7,266 | wikitools3.user | __eq__ | null | def __eq__(self, other):
if not isinstance(other, User):
return False
if self.name == other.name and self.site == other.site:
return True
return False
| (self, other) |
7,267 | wikitools3.user | __hash__ | null | def __hash__(self):
return int(self.name) ^ hash(self.site.apibase)
| (self) |
7,268 | wikitools3.user | __init__ |
wiki - A wiki object
name - The username, as a string
check - Checks for existence, normalizes name
| def __init__(self, site, name, check=True):
"""
wiki - A wiki object
name - The username, as a string
check - Checks for existence, normalizes name
"""
self.site = site
self.name = name.strip()
if not isinstance(self.name, str):
self.name = str(self.name, "utf8")
self.exists = True # If we're not going to check, assume it does
self.blocked = None # So we can tell the difference between blocked/not blocked/haven't checked
self.editcount = -1
self.groups = []
self.id = 0
if check:
self.setUserInfo()
self.isIP = False
self.IPcheck()
self.page = page.Page(
self.site,
":".join([self.site.namespaces[2]["*"], self.name]),
check=check,
followRedir=False,
)
| (self, site, name, check=True) |
7,269 | wikitools3.user | __ne__ | null | def __ne__(self, other):
if not isinstance(other, User):
return True
if self.name == other.name and self.site == other.site:
return False
return True
| (self, other) |
7,270 | wikitools3.user | __repr__ | null | def __repr__(self):
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(self.name)
+ " on "
+ repr(self.site.apibase)
+ ">"
)
| (self) |
7,271 | wikitools3.user | __str__ | null | def __str__(self):
return (
self.__class__.__name__
+ " "
+ repr(self.name)
+ " on "
+ repr(self.site.domain)
)
| (self) |
7,272 | wikitools3.user | block | Block the user
Params are the same as the API
reason - block reason
expiry - block expiration
anononly - block anonymous users only
nocreate - disable account creation
autoblock - block IP addresses used by the user
noemail - block user from sending email through the site
hidename - hide the username from the log (requires hideuser right)
allowusertalk - allow the user to edit their talk page
reblock - overwrite existing block
| def block(
self,
reason=False,
expiry=False,
anononly=False,
nocreate=False,
autoblock=False,
noemail=False,
hidename=False,
allowusertalk=False,
reblock=False,
):
"""Block the user
Params are the same as the API
reason - block reason
expiry - block expiration
anononly - block anonymous users only
nocreate - disable account creation
autoblock - block IP addresses used by the user
noemail - block user from sending email through the site
hidename - hide the username from the log (requires hideuser right)
allowusertalk - allow the user to edit their talk page
reblock - overwrite existing block
"""
token = self.site.getToken("csrf")
params = {"action": "block", "user": self.name, "token": token}
if reason:
params["reason"] = reason
if expiry:
params["expiry"] = expiry
if anononly:
params["anononly"] = ""
if nocreate:
params["nocreate"] = ""
if autoblock:
params["autoblock"] = ""
if noemail:
params["noemail"] = ""
if hidename:
params["hidename"] = ""
if allowusertalk:
params["allowusertalk"] = ""
if reblock:
params["reblock"] = ""
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if "block" in res:
self.blocked = True
return res
| (self, reason=False, expiry=False, anononly=False, nocreate=False, autoblock=False, noemail=False, hidename=False, allowusertalk=False, reblock=False) |
7,273 | wikitools3.user | getTalkPage | Convenience function to get an object for the user's talk page | def getTalkPage(self, check=True, followRedir=False):
"""Convenience function to get an object for the user's talk page"""
return page.Page(
self.site,
":".join([self.site.namespaces[3]["*"], self.name]),
check=check,
followRedir=False,
)
| (self, check=True, followRedir=False) |
7,274 | wikitools3.user | isBlocked | Determine if a user is blocked | def isBlocked(self, force=False):
"""Determine if a user is blocked"""
if self.blocked is not None and not force:
return self.blocked
params = {
"action": "query",
"list": "blocks",
"bkusers": self.name,
"bkprop": "id",
}
req = api.APIRequest(self.site, params)
res = req.query(False)
if len(res["query"]["blocks"]) > 0:
self.blocked = True
else:
self.blocked = False
return self.blocked
| (self, force=False) |
7,275 | wikitools3.user | setUserInfo | Sets basic user info | def setUserInfo(self):
"""Sets basic user info"""
params = {
"action": "query",
"list": "users",
"ususers": self.name,
"usprop": "blockinfo|groups|editcount",
}
req = api.APIRequest(self.site, params)
response = req.query(False)
user = response["query"]["users"][0]
self.name = user["name"]
if "missing" in user or "invalid" in user:
self.exists = False
return
self.id = int(user["userid"])
self.editcount = int(user["editcount"])
if "groups" in user:
self.groups = user["groups"]
if "blockedby" in user:
self.blocked = True
else:
self.blocked = False
return self
| (self) |
7,276 | wikitools3.user | unblock | Unblock the user
reason - reason for the log
| def unblock(self, reason=False):
"""Unblock the user
reason - reason for the log
"""
token = self.site.getToken("csrf")
params = {"action": "unblock", "user": self.name, "token": token}
if reason:
params["reason"] = reason
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if "unblock" in res:
self.blocked = False
return res
| (self, reason=False) |
7,277 | wikitools3.wiki | UserBlocked | Trying to edit while blocked | class UserBlocked(WikiError):
"""Trying to edit while blocked"""
| null |
7,278 | wikitools3.wiki | Wiki | A Wiki site | class Wiki:
"""A Wiki site"""
def __init__(
self,
url="https://en.wikipedia.org/w/api.php",
httpuser=None,
httppass=None,
preauth=False,
):
"""
url - A URL to the site's API, defaults to en.wikipedia
httpuser - optional user name for HTTP Auth
httppass - password for HTTP Auth, leave out to enter interactively
preauth - true to send headers for HTTP Auth on the first request
instead of relying on the negotiation for them
"""
self.apibase = url
self.cookies = WikiCookieJar()
self.username = ""
urlbits = urlparse(self.apibase)
self.domain = "://".join([urlbits.scheme, urlbits.netloc])
if httpuser is not None:
if httppass is None:
from getpass import getpass
self.httppass = getpass("HTTP Auth password for " + httpuser + ": ")
if preauth:
self.httppass = httppass
self.auth = httpuser
else:
self.passman = HTTPPasswordMgrWithDefaultRealm()
self.passman.add_password(None, self.domain, httpuser, httppass)
else:
self.passman = None
self.auth = None
self.maxlag = 5
self.maxwaittime = 120
self.useragent = "python-wikitools3/%s" % VERSION
self.cookiepath = ""
self.limit = 500
self.siteinfo = {}
self.namespaces = {}
self.NSaliases = {}
self.assertval = None
self.newtoken = False
try:
self.setSiteinfo()
except api.APIError: # probably read-restricted
pass
def setSiteinfo(self):
"""Retrieves basic siteinfo
Called when constructing,
or after login if the first call failed
"""
params = {
"action": "query",
"meta": "siteinfo|tokens",
"siprop": "general|namespaces|namespacealiases",
}
if self.maxlag < 120:
params["maxlag"] = 120
req = api.APIRequest(self, params)
info = req.query(False)
sidata = info["query"]["general"]
for item in sidata:
self.siteinfo[item] = sidata[item]
nsdata = info["query"]["namespaces"]
for ns in nsdata:
nsinfo = nsdata[ns]
self.namespaces[nsinfo["id"]] = nsinfo
if ns != "0":
try:
attr = "NS_%s" % (nsdata[ns]["canonical"].replace(" ", "_").upper())
except KeyError:
attr = "NS_%s" % (nsdata[ns]["*"].replace(" ", "_").upper())
else:
attr = "NS_MAIN"
setattr(self, attr.encode("utf8"), Namespace(ns.encode("utf8")))
nsaliasdata = info["query"]["namespacealiases"]
if nsaliasdata:
for ns in nsaliasdata:
self.NSaliases[ns["*"]] = ns["id"]
if not "writeapi" in sidata:
warnings.warn(
UserWarning,
"WARNING: Write-API not enabled, you will not be able to edit",
)
version = re.search("\d\.(\d\d)", self.siteinfo["generator"])
if not int(version.group(1)) >= 13: # Will this even work on 13?
warnings.warn(
UserWarning,
"WARNING: Some features may not work on older versions of MediaWiki",
)
if "tokens" in info["query"].keys():
self.newtoken = True
return self
def login(
self,
username,
password=False,
remember=False,
force=False,
verify=True,
domain=None,
):
"""Login to the site
remember - saves cookies to a file - the filename will be:
hash(username - apibase).cookies
the cookies will be saved in the current directory, change cookiepath
to use a different location
force - forces login over the API even if a cookie file exists
and overwrites an existing cookie file if remember is True
verify - Checks cookie validity with isLoggedIn()
domain - domain name, required for some auth systems like LDAP
"""
if not force:
try:
cookiefile = (
self.cookiepath
+ str(hash(username + " - " + self.apibase))
+ ".cookies"
)
self.cookies.load(self, cookiefile, True, True)
self.username = username
if not verify or self.isLoggedIn(self.username):
return True
except:
pass
if not password:
from getpass import getpass
password = getpass("Wiki password for " + username + ": ")
def loginerror(info):
try:
print(info["login"]["result"])
except:
print(info["error"]["code"])
print(info["error"]["info"])
return False
data = {
"action": "login",
"lgname": username,
"lgpassword": password,
}
if domain is not None:
data["lgdomain"] = domain
if self.maxlag < 120:
data["maxlag"] = 120
req = api.APIRequest(self, data)
info = req.query()
if info["login"]["result"] == "Success":
self.username = username
elif info["login"]["result"] == "NeedToken":
req.changeParam("lgtoken", info["login"]["token"])
info = req.query()
if info["login"]["result"] == "Success":
self.username = username
else:
return loginerror(info)
else:
return loginerror(info)
if not self.siteinfo:
self.setSiteinfo()
params = {
"action": "query",
"meta": "userinfo",
"uiprop": "rights",
}
if self.maxlag < 120:
params["maxlag"] = 120
req = api.APIRequest(self, params)
info = req.query(False)
user_rights = info["query"]["userinfo"]["rights"]
if "apihighlimits" in user_rights:
self.limit = 5000
if remember:
cookiefile = (
self.cookiepath
+ str(hash(self.username + " - " + self.apibase))
+ ".cookies"
)
self.cookies.save(self, cookiefile, True, True)
if self.useragent == "python-wikitools3/%s" % VERSION:
self.useragent = "python-wikitools3/%s (User:%s)" % (VERSION, self.username)
return True
def logout(self):
params = {"action": "logout"}
if self.maxlag < 120:
params["maxlag"] = 120
cookiefile = (
self.cookiepath
+ str(hash(self.username + " - " + self.apibase))
+ ".cookies"
)
try:
os.remove(cookiefile)
except:
pass
req = api.APIRequest(self, params, write=True)
# action=logout returns absolutely nothing, which json.loads() treats as False
# causing APIRequest.query() to get stuck in a loop
req.opener.open(req.request)
self.cookies = WikiCookieJar()
self.username = ""
self.maxlag = 5
self.useragent = "python-wikitools3/%s" % VERSION
self.limit = 500
return True
def isLoggedIn(self, username=False):
"""Verify that we are a logged in user
username - specify a username to check against
"""
data = {
"action": "query",
"meta": "userinfo",
}
if self.maxlag < 120:
data["maxlag"] = 120
req = api.APIRequest(self, data)
info = req.query(False)
if info["query"]["userinfo"]["id"] == 0:
return False
elif username and info["query"]["userinfo"]["name"] != username:
return False
else:
return True
def setMaxlag(self, maxlag=5):
"""Set the maximum server lag to allow
If the lag is > the maxlag value, all requests will wait
Setting to a negative number will disable maxlag checks
"""
try:
int(maxlag)
except:
raise WikiError("maxlag must be an integer")
self.maxlag = int(maxlag)
return self.maxlag
def setUserAgent(self, useragent):
"""Function to set a different user-agent"""
self.useragent = str(useragent)
return self.useragent
def setAssert(self, value):
"""Set an assertion value
This only makes a difference on sites with the AssertEdit extension
on others it will be silently ignored
This is only checked on edits, so only applied to write queries
Set to None (the default) to not use anything
http://www.mediawiki.org/wiki/Extension:Assert_Edit
"""
valid = ["user", "bot", "true", "false", "exists", "test", None]
if value not in valid:
raise WikiError("Invalid assertion")
self.assertval = value
return self.assertval
def getToken(self, type):
"""Get a token
For wikis with MW 1.24 or newer:
type (string) - csrf, deleteglobalaccount, patrol, rollback, setglobalaccountstatus, userrights, watch
For older wiki versions, only csrf (edit, move, etc.) tokens are supported
"""
if self.newtoken:
params = {
"action": "query",
"meta": "tokens",
"type": type,
}
req = api.APIRequest(self, params)
response = req.query(False)
token = response["query"]["tokens"][type + "token"]
else:
if type not in [
"edit",
"delete",
"protect",
"move",
"block",
"unblock",
"email",
"csrf",
]:
raise WikiError("Token type unavailable")
params = {
"action": "query",
"prop": "info",
"intoken": "edit",
"titles": "1",
}
req = api.APIRequest(self, params)
response = req.query(False)
if response.get("data", False):
pid = response["data"]["query"]["pages"].keys()[0]
token = response["query"]["pages"][pid]["edittoken"]
else:
pages = response["query"]["pages"]
token = pages.itervalues().next()["edittoken"]
return token
def __hash__(self):
return hash(self.apibase)
def __eq__(self, other):
if not isinstance(other, Wiki):
return False
if self.apibase == other.apibase:
return True
return False
def __ne__(self, other):
if not isinstance(other, Wiki):
return True
if self.apibase == other.apibase:
return False
return True
def __str__(self):
if self.username:
user = " - using User:" + self.username
else:
user = " - not logged in"
return self.domain + user
def __repr__(self):
if self.username:
user = " User:" + self.username
else:
user = " not logged in"
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(self.apibase)
+ user
+ ">"
)
| (url='https://en.wikipedia.org/w/api.php', httpuser=None, httppass=None, preauth=False) |
7,279 | wikitools3.wiki | __eq__ | null | def __eq__(self, other):
if not isinstance(other, Wiki):
return False
if self.apibase == other.apibase:
return True
return False
| (self, other) |
7,280 | wikitools3.wiki | __hash__ | null | def __hash__(self):
return hash(self.apibase)
| (self) |
7,281 | wikitools3.wiki | __init__ |
url - A URL to the site's API, defaults to en.wikipedia
httpuser - optional user name for HTTP Auth
httppass - password for HTTP Auth, leave out to enter interactively
preauth - true to send headers for HTTP Auth on the first request
instead of relying on the negotiation for them
| def __init__(
self,
url="https://en.wikipedia.org/w/api.php",
httpuser=None,
httppass=None,
preauth=False,
):
"""
url - A URL to the site's API, defaults to en.wikipedia
httpuser - optional user name for HTTP Auth
httppass - password for HTTP Auth, leave out to enter interactively
preauth - true to send headers for HTTP Auth on the first request
instead of relying on the negotiation for them
"""
self.apibase = url
self.cookies = WikiCookieJar()
self.username = ""
urlbits = urlparse(self.apibase)
self.domain = "://".join([urlbits.scheme, urlbits.netloc])
if httpuser is not None:
if httppass is None:
from getpass import getpass
self.httppass = getpass("HTTP Auth password for " + httpuser + ": ")
if preauth:
self.httppass = httppass
self.auth = httpuser
else:
self.passman = HTTPPasswordMgrWithDefaultRealm()
self.passman.add_password(None, self.domain, httpuser, httppass)
else:
self.passman = None
self.auth = None
self.maxlag = 5
self.maxwaittime = 120
self.useragent = "python-wikitools3/%s" % VERSION
self.cookiepath = ""
self.limit = 500
self.siteinfo = {}
self.namespaces = {}
self.NSaliases = {}
self.assertval = None
self.newtoken = False
try:
self.setSiteinfo()
except api.APIError: # probably read-restricted
pass
| (self, url='https://en.wikipedia.org/w/api.php', httpuser=None, httppass=None, preauth=False) |
7,282 | wikitools3.wiki | __ne__ | null | def __ne__(self, other):
if not isinstance(other, Wiki):
return True
if self.apibase == other.apibase:
return False
return True
| (self, other) |
7,283 | wikitools3.wiki | __repr__ | null | def __repr__(self):
if self.username:
user = " User:" + self.username
else:
user = " not logged in"
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(self.apibase)
+ user
+ ">"
)
| (self) |
7,284 | wikitools3.wiki | __str__ | null | def __str__(self):
if self.username:
user = " - using User:" + self.username
else:
user = " - not logged in"
return self.domain + user
| (self) |
7,285 | wikitools3.wiki | getToken | Get a token
For wikis with MW 1.24 or newer:
type (string) - csrf, deleteglobalaccount, patrol, rollback, setglobalaccountstatus, userrights, watch
For older wiki versions, only csrf (edit, move, etc.) tokens are supported
| def getToken(self, type):
"""Get a token
For wikis with MW 1.24 or newer:
type (string) - csrf, deleteglobalaccount, patrol, rollback, setglobalaccountstatus, userrights, watch
For older wiki versions, only csrf (edit, move, etc.) tokens are supported
"""
if self.newtoken:
params = {
"action": "query",
"meta": "tokens",
"type": type,
}
req = api.APIRequest(self, params)
response = req.query(False)
token = response["query"]["tokens"][type + "token"]
else:
if type not in [
"edit",
"delete",
"protect",
"move",
"block",
"unblock",
"email",
"csrf",
]:
raise WikiError("Token type unavailable")
params = {
"action": "query",
"prop": "info",
"intoken": "edit",
"titles": "1",
}
req = api.APIRequest(self, params)
response = req.query(False)
if response.get("data", False):
pid = response["data"]["query"]["pages"].keys()[0]
token = response["query"]["pages"][pid]["edittoken"]
else:
pages = response["query"]["pages"]
token = pages.itervalues().next()["edittoken"]
return token
| (self, type) |
7,286 | wikitools3.wiki | isLoggedIn | Verify that we are a logged in user
username - specify a username to check against
| def isLoggedIn(self, username=False):
"""Verify that we are a logged in user
username - specify a username to check against
"""
data = {
"action": "query",
"meta": "userinfo",
}
if self.maxlag < 120:
data["maxlag"] = 120
req = api.APIRequest(self, data)
info = req.query(False)
if info["query"]["userinfo"]["id"] == 0:
return False
elif username and info["query"]["userinfo"]["name"] != username:
return False
else:
return True
| (self, username=False) |
7,287 | wikitools3.wiki | login | Login to the site
remember - saves cookies to a file - the filename will be:
hash(username - apibase).cookies
the cookies will be saved in the current directory, change cookiepath
to use a different location
force - forces login over the API even if a cookie file exists
and overwrites an existing cookie file if remember is True
verify - Checks cookie validity with isLoggedIn()
domain - domain name, required for some auth systems like LDAP
| def login(
self,
username,
password=False,
remember=False,
force=False,
verify=True,
domain=None,
):
"""Login to the site
remember - saves cookies to a file - the filename will be:
hash(username - apibase).cookies
the cookies will be saved in the current directory, change cookiepath
to use a different location
force - forces login over the API even if a cookie file exists
and overwrites an existing cookie file if remember is True
verify - Checks cookie validity with isLoggedIn()
domain - domain name, required for some auth systems like LDAP
"""
if not force:
try:
cookiefile = (
self.cookiepath
+ str(hash(username + " - " + self.apibase))
+ ".cookies"
)
self.cookies.load(self, cookiefile, True, True)
self.username = username
if not verify or self.isLoggedIn(self.username):
return True
except:
pass
if not password:
from getpass import getpass
password = getpass("Wiki password for " + username + ": ")
def loginerror(info):
try:
print(info["login"]["result"])
except:
print(info["error"]["code"])
print(info["error"]["info"])
return False
data = {
"action": "login",
"lgname": username,
"lgpassword": password,
}
if domain is not None:
data["lgdomain"] = domain
if self.maxlag < 120:
data["maxlag"] = 120
req = api.APIRequest(self, data)
info = req.query()
if info["login"]["result"] == "Success":
self.username = username
elif info["login"]["result"] == "NeedToken":
req.changeParam("lgtoken", info["login"]["token"])
info = req.query()
if info["login"]["result"] == "Success":
self.username = username
else:
return loginerror(info)
else:
return loginerror(info)
if not self.siteinfo:
self.setSiteinfo()
params = {
"action": "query",
"meta": "userinfo",
"uiprop": "rights",
}
if self.maxlag < 120:
params["maxlag"] = 120
req = api.APIRequest(self, params)
info = req.query(False)
user_rights = info["query"]["userinfo"]["rights"]
if "apihighlimits" in user_rights:
self.limit = 5000
if remember:
cookiefile = (
self.cookiepath
+ str(hash(self.username + " - " + self.apibase))
+ ".cookies"
)
self.cookies.save(self, cookiefile, True, True)
if self.useragent == "python-wikitools3/%s" % VERSION:
self.useragent = "python-wikitools3/%s (User:%s)" % (VERSION, self.username)
return True
| (self, username, password=False, remember=False, force=False, verify=True, domain=None) |
7,288 | wikitools3.wiki | logout | null | def logout(self):
params = {"action": "logout"}
if self.maxlag < 120:
params["maxlag"] = 120
cookiefile = (
self.cookiepath
+ str(hash(self.username + " - " + self.apibase))
+ ".cookies"
)
try:
os.remove(cookiefile)
except:
pass
req = api.APIRequest(self, params, write=True)
# action=logout returns absolutely nothing, which json.loads() treats as False
# causing APIRequest.query() to get stuck in a loop
req.opener.open(req.request)
self.cookies = WikiCookieJar()
self.username = ""
self.maxlag = 5
self.useragent = "python-wikitools3/%s" % VERSION
self.limit = 500
return True
| (self) |
7,289 | wikitools3.wiki | setAssert | Set an assertion value
This only makes a difference on sites with the AssertEdit extension
on others it will be silently ignored
This is only checked on edits, so only applied to write queries
Set to None (the default) to not use anything
http://www.mediawiki.org/wiki/Extension:Assert_Edit
| def setAssert(self, value):
"""Set an assertion value
This only makes a difference on sites with the AssertEdit extension
on others it will be silently ignored
This is only checked on edits, so only applied to write queries
Set to None (the default) to not use anything
http://www.mediawiki.org/wiki/Extension:Assert_Edit
"""
valid = ["user", "bot", "true", "false", "exists", "test", None]
if value not in valid:
raise WikiError("Invalid assertion")
self.assertval = value
return self.assertval
| (self, value) |
7,290 | wikitools3.wiki | setMaxlag | Set the maximum server lag to allow
If the lag is > the maxlag value, all requests will wait
Setting to a negative number will disable maxlag checks
| def setMaxlag(self, maxlag=5):
"""Set the maximum server lag to allow
If the lag is > the maxlag value, all requests will wait
Setting to a negative number will disable maxlag checks
"""
try:
int(maxlag)
except:
raise WikiError("maxlag must be an integer")
self.maxlag = int(maxlag)
return self.maxlag
| (self, maxlag=5) |
7,291 | wikitools3.wiki | setSiteinfo | Retrieves basic siteinfo
Called when constructing,
or after login if the first call failed
| def setSiteinfo(self):
"""Retrieves basic siteinfo
Called when constructing,
or after login if the first call failed
"""
params = {
"action": "query",
"meta": "siteinfo|tokens",
"siprop": "general|namespaces|namespacealiases",
}
if self.maxlag < 120:
params["maxlag"] = 120
req = api.APIRequest(self, params)
info = req.query(False)
sidata = info["query"]["general"]
for item in sidata:
self.siteinfo[item] = sidata[item]
nsdata = info["query"]["namespaces"]
for ns in nsdata:
nsinfo = nsdata[ns]
self.namespaces[nsinfo["id"]] = nsinfo
if ns != "0":
try:
attr = "NS_%s" % (nsdata[ns]["canonical"].replace(" ", "_").upper())
except KeyError:
attr = "NS_%s" % (nsdata[ns]["*"].replace(" ", "_").upper())
else:
attr = "NS_MAIN"
setattr(self, attr.encode("utf8"), Namespace(ns.encode("utf8")))
nsaliasdata = info["query"]["namespacealiases"]
if nsaliasdata:
for ns in nsaliasdata:
self.NSaliases[ns["*"]] = ns["id"]
if not "writeapi" in sidata:
warnings.warn(
UserWarning,
"WARNING: Write-API not enabled, you will not be able to edit",
)
version = re.search("\d\.(\d\d)", self.siteinfo["generator"])
if not int(version.group(1)) >= 13: # Will this even work on 13?
warnings.warn(
UserWarning,
"WARNING: Some features may not work on older versions of MediaWiki",
)
if "tokens" in info["query"].keys():
self.newtoken = True
return self
| (self) |
7,292 | wikitools3.wiki | setUserAgent | Function to set a different user-agent | def setUserAgent(self, useragent):
"""Function to set a different user-agent"""
self.useragent = str(useragent)
return self.useragent
| (self, useragent) |
7,293 | wikitools3.wiki | WikiCookieJar | null | class WikiCookieJar(http.cookiejar.FileCookieJar):
def save(self, site, filename=None, ignore_discard=False, ignore_expires=False):
if not filename:
filename = self.filename
old_umask = os.umask(0o077)
f = open(filename, "w")
f.write("")
content = ""
for c in self:
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired:
continue
cook = pickle.dumps(c, 2)
f.write(cook + "|~|")
content += (
str(int(time.time())) + "|~|"
) # record the current time so we can test for expiration later
content += "site.limit = %d;" % (
site.limit
) # This eventially might have more stuff in it
f.write(content)
f.close()
os.umask(old_umask)
def load(self, site, filename, ignore_discard, ignore_expires):
f = open(filename, "r")
cookies = f.read().split("|~|")
saved = cookies[len(cookies) - 2]
if (
int(time.time()) - int(saved) > 1296000
): # 15 days, not sure when the cookies actually expire...
f.close()
os.remove(filename)
raise CookiesExpired
sitedata = cookies[len(cookies) - 1]
del cookies[len(cookies) - 2]
del cookies[len(cookies) - 1]
for c in cookies:
cook = pickle.loads(c)
if not ignore_discard and cook.discard:
continue
if not ignore_expires and cook.is_expired:
continue
self.set_cookie(cook)
exec(sitedata)
f.close()
| (filename=None, delayload=False, policy=None) |
7,294 | http.cookiejar | __init__ |
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
| def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
filename = os.fspath(filename)
self.filename = filename
self.delayload = bool(delayload)
| (self, filename=None, delayload=False, policy=None) |
7,295 | http.cookiejar | __iter__ | null | def __iter__(self):
return deepvalues(self._cookies)
| (self) |
7,296 | http.cookiejar | __len__ | Return number of contained cookies. | def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
| (self) |
7,297 | http.cookiejar | __repr__ | null | def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
| (self) |
7,298 | http.cookiejar | __str__ | null | def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
| (self) |
7,299 | http.cookiejar | _cookie_attrs | Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
| def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda a: len(a.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
| (self, cookies) |
7,300 | http.cookiejar | _cookie_from_cookie_tuple | null | def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
| (self, tup, request) |
7,301 | http.cookiejar | _cookies_for_domain | null | def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
| (self, domain, request) |
7,302 | http.cookiejar | _cookies_for_request | Return a list of cookies to be returned to server. | def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
| (self, request) |
7,303 | http.cookiejar | _cookies_from_attrs_set | null | def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
| (self, attrs_set, request) |
7,304 | http.cookiejar | _normalized_cookie_tuples | Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
| def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
| (self, attrs_set) |
7,305 | http.cookiejar | _process_rfc2109_cookies | null | def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
| (self, cookies) |
7,306 | http.cookiejar | add_cookie_header | Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
| def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
| (self, request) |
7,307 | http.cookiejar | clear | Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
| def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
| (self, domain=None, path=None, name=None) |
7,308 | http.cookiejar | clear_expired_cookies | Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
| def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
| (self) |
7,309 | http.cookiejar | clear_session_cookies | Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
| def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
| (self) |
7,310 | http.cookiejar | extract_cookies | Extract cookies from response, where allowable given the request. | def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
| (self, response, request) |
7,311 | wikitools3.wiki | load | null | def load(self, site, filename, ignore_discard, ignore_expires):
f = open(filename, "r")
cookies = f.read().split("|~|")
saved = cookies[len(cookies) - 2]
if (
int(time.time()) - int(saved) > 1296000
): # 15 days, not sure when the cookies actually expire...
f.close()
os.remove(filename)
raise CookiesExpired
sitedata = cookies[len(cookies) - 1]
del cookies[len(cookies) - 2]
del cookies[len(cookies) - 1]
for c in cookies:
cook = pickle.loads(c)
if not ignore_discard and cook.discard:
continue
if not ignore_expires and cook.is_expired:
continue
self.set_cookie(cook)
exec(sitedata)
f.close()
| (self, site, filename, ignore_discard, ignore_expires) |
7,312 | http.cookiejar | make_cookies | Return sequence of Cookie objects extracted from response object. | def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
self._policy._now = self._now = int(time.time())
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
| (self, response, request) |
7,313 | http.cookiejar | revert | Clear all cookies and reload cookies from a saved file.
Raises LoadError (or OSError) if reversion is not successful; the
object's state will not be altered if this happens.
| def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or OSError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except OSError:
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
| (self, filename=None, ignore_discard=False, ignore_expires=False) |
7,314 | wikitools3.wiki | save | null | def save(self, site, filename=None, ignore_discard=False, ignore_expires=False):
if not filename:
filename = self.filename
old_umask = os.umask(0o077)
f = open(filename, "w")
f.write("")
content = ""
for c in self:
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired:
continue
cook = pickle.dumps(c, 2)
f.write(cook + "|~|")
content += (
str(int(time.time())) + "|~|"
) # record the current time so we can test for expiration later
content += "site.limit = %d;" % (
site.limit
) # This eventially might have more stuff in it
f.write(content)
f.close()
os.umask(old_umask)
| (self, site, filename=None, ignore_discard=False, ignore_expires=False) |
7,315 | http.cookiejar | set_cookie | Set a cookie, without checking whether or not it should be set. | def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
| (self, cookie) |
7,316 | http.cookiejar | set_cookie_if_ok | Set a cookie if policy says it's OK to do so. | def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
| (self, cookie, request) |
7,317 | http.cookiejar | set_policy | null | def set_policy(self, policy):
self._policy = policy
| (self, policy) |
7,318 | wikitools3.wiki | WikiError | Base class for errors | class WikiError(Exception):
"""Base class for errors"""
| null |
7,328 | poster3.encode | multipart_encode | Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
| def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
If ``cb`` is set, it should be a callback which will get called as blocks
of data are encoded. It will be called with (param, current, total),
indicating the current parameter being encoded, the current amount encoded,
and the total amount to encode.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.parse.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
return multipart_yielder(params, boundary, cb), headers
| (params, boundary=None, cb=None) |
7,329 | wikitools3.page | namespaceDetect | Detect the namespace of a given title
title - the page title
site - the wiki object the page is on
| def namespaceDetect(title, site):
"""Detect the namespace of a given title
title - the page title
site - the wiki object the page is on
"""
bits = title.split(":", 1)
if len(bits) == 1 or bits[0] == "":
return 0
else:
nsprefix = bits[
0
].lower() # wp:Foo and caTEGory:Foo are normalized by MediaWiki
for ns in site.namespaces:
if nsprefix == site.namespaces[ns]["*"].lower():
return int(ns)
else:
if site.NSaliases:
for ns in site.NSaliases:
if nsprefix == ns.lower():
return int(site.NSaliases[ns])
return 0
| (title, site) |
7,333 | urllib.parse | quote_plus | Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
| def quote_plus(string, safe='', encoding=None, errors=None):
"""Like quote(), but also replace ' ' with '+', as required for quoting
HTML form values. Plus signs in the original string are escaped unless
they are included in safe. It also does not have safe default to '/'.
"""
# Check if ' ' in string, where string may either be a str or bytes. If
# there are no spaces, the regular quote will produce the right answer.
if ((isinstance(string, str) and ' ' not in string) or
(isinstance(string, bytes) and b' ' not in string)):
return quote(string, safe, encoding, errors)
if isinstance(safe, str):
space = ' '
else:
space = b' '
string = quote(string, safe + space, encoding, errors)
return string.replace(' ', '+')
| (string, safe='', encoding=None, errors=None) |
7,335 | wikitools3.api | resultCombine | Experimental-ish result-combiner thing
If the result isn't something from action=query,
this will just explode, but that shouldn't happen hopefully?
| def resultCombine(type, old, new):
"""Experimental-ish result-combiner thing
If the result isn't something from action=query,
this will just explode, but that shouldn't happen hopefully?
"""
ret = old
if type in new["query"]: # Basic list, easy
ret["query"][type].extend(new["query"][type])
else: # Else its some sort of prop=thing and/or a generator query
for key in new["query"]["pages"].keys(): # Go through each page
if not key in old["query"]["pages"]: # if it only exists in the new one
ret["query"]["pages"][key] = new["query"]["pages"][
key
] # add it to the list
else:
if not type in new["query"]["pages"][key]:
continue
elif (
type in new["query"]["pages"][key]
and not type in ret["query"]["pages"][key]
): # if only the new one does, just add it to the return
ret["query"]["pages"][key][type] = new["query"]["pages"][key][type]
continue
else: # Need to check for possible duplicates for some, this is faster than just iterating over new and checking for dups in ret
retset = set(
[
tuple(entry.items())
for entry in ret["query"]["pages"][key][type]
]
)
newset = set(
[
tuple(entry.items())
for entry in new["query"]["pages"][key][type]
]
)
retset.update(newset)
ret["query"]["pages"][key][type] = [dict(entry) for entry in retset]
return ret
| (type, old, new) |
7,340 | wikitools3.api | urlencode |
Hack of urllib's urlencode function, which can handle
utf-8, but for unknown reasons, chooses not to by
trying to encode everything as ascii
| def urlencode(query, doseq=0):
"""
Hack of urllib's urlencode function, which can handle
utf-8, but for unknown reasons, chooses not to by
trying to encode everything as ascii
"""
if hasattr(query, "items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object", tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + "=" + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + "=" + v)
elif v.type(str):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("utf8", "replace"))
l.append(k + "=" + v)
else:
try:
# is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + "=" + v)
else:
# loop over the sequence
for elt in v:
l.append(k + "=" + quote_plus(str(elt)))
return "&".join(l)
| (query, doseq=0) |
7,347 | requests_auth_aws_sigv4 | AWSSigV4 | null | class AWSSigV4(AuthBase):
def __init__(self, service, **kwargs):
''' Create authentication mechanism
:param service: AWS Service identifier, for example `ec2`. This is required.
:param region: AWS Region, for example `us-east-1`. If not provided, it will be set using
the environment variables `AWS_DEFAULT_REGION` or using boto3, if available.
:param session: If boto3 is available, will attempt to get credentials using boto3,
unless passed explicitly. If using boto3, the provided session will be used or a new
session will be created.
'''
# Set Service
self.service = service
if USE_BOTO3:
# Setup Session
if 'session' in kwargs:
if type(kwargs['session']) == boto3.Session:
session = kwargs['session']
else:
raise ValueError("Session must be boto3.Session, {} invalid, ".format(type(kwargs['session'])))
else:
session = boto3.Session()
log.debug("Using boto3 session: %s", session)
# First, get credentials passed explicitly
self.aws_access_key_id = kwargs.get('aws_access_key_id')
self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
self.aws_session_token = kwargs.get('aws_session_token')
# Next, try environment variables or use boto3
if self.aws_access_key_id is None or self.aws_secret_access_key is None:
if USE_BOTO3:
cred = session.get_credentials()
log.debug("Got credential from boto3 session")
self.aws_access_key_id = cred.access_key
self.aws_secret_access_key = cred.secret_key
self.aws_session_token = cred.token
else:
log.debug("Checking environment for credentials")
self.aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
self.aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
self.aws_session_token = os.environ.get('AWS_SESSION_TOKEN') or os.environ.get('AWS_SECURITY_TOKEN')
# Last, fail if still not found
if self.aws_access_key_id is None or self.aws_secret_access_key is None:
raise KeyError("AWS Access Key ID and Secret Access Key are required")
# Get Region passed explicitly
self.region = kwargs.get('region')
# Next, try environment variables or use boto3
if self.region is None:
if USE_BOTO3:
self.region = session.region_name
log.debug("Got region from boto3 session")
else:
log.debug("Checking environment for region")
self.region = os.environ.get('AWS_DEFAULT_REGION')
# Last, fail if not found
if self.region is None:
raise KeyError("Region is required")
def __call__(self, r):
''' Called to add authentication information to request
:param r: `requests.models.PreparedRequest` object to modify
:returns: `requests.models.PreparedRequest`, modified to add authentication
'''
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
self.amzdate = t.strftime('%Y%m%dT%H%M%SZ')
self.datestamp = t.strftime('%Y%m%d')
log.debug("Starting authentication with amzdate=%s", self.amzdate)
# Parse request to get URL parts
p = urlparse(r.url)
log.debug("Request URL: %s", p)
host = p.hostname
uri = p.path
if len(p.query) > 0:
qs = dict(map(lambda i: i.split('='), p.query.split('&')))
else:
qs = dict()
# Setup Headers
# r.headers is type `requests.structures.CaseInsensitiveDict`
if 'Host' not in r.headers:
r.headers['Host'] = host
if 'Content-Type' not in r.headers:
r.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
if 'User-Agent' not in r.headers:
r.headers['User-Agent'] = 'python-requests/{} auth-aws-sigv4/{}'.format(
requests_version, __version__)
r.headers['X-AMZ-Date'] = self.amzdate
if self.aws_session_token is not None:
r.headers['x-amz-security-token'] = self.aws_session_token
## Task 1: Create Cononical Request
## Ref: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
# Query string values must be URL-encoded (space=%20) and be sorted by name.
canonical_querystring = "&".join(map(lambda p: "=".join(p), sorted(qs.items())))
# Create payload hash (hash of the request body content).
if r.method == 'GET':
payload_hash = hashlib.sha256(('').encode('utf-8')).hexdigest()
else:
if r.body:
if isinstance(r.body, bytes):
log.debug("Request Body: <bytes> %s", r.body)
payload_hash = hashlib.sha256(r.body).hexdigest()
else:
log.debug("Request Body: <str> %s", r.body)
payload_hash = hashlib.sha256(r.body.encode('utf-8')).hexdigest()
else:
log.debug("Request Body is empty")
payload_hash = hashlib.sha256(b'').hexdigest()
r.headers['x-amz-content-sha256'] = payload_hash
# Create the canonical headers and signed headers. Header names
# must be trimmed and lowercase, and sorted in code point order from
# low to high. Note that there is a trailing \n.
headers_to_sign = sorted(filter(lambda h: h.startswith('x-amz-') or h == 'host',
map(lambda H: H.lower(), r.headers.keys())))
canonical_headers = ''.join(map(lambda h: ":".join((h, r.headers[h])) + '\n', headers_to_sign))
signed_headers = ';'.join(headers_to_sign)
# Combine elements to create canonical request
canonical_request = '\n'.join([r.method, uri, canonical_querystring,
canonical_headers, signed_headers, payload_hash])
log.debug("Canonical Request: '%s'", canonical_request)
## Task 2: Create string to sign
credential_scope = '/'.join([self.datestamp, self.region, self.service, 'aws4_request'])
string_to_sign = '\n'.join(['AWS4-HMAC-SHA256', self.amzdate,
credential_scope, hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()])
log.debug("String-to-Sign: '%s'", string_to_sign)
## Task 3: Calculate Signature
kDate = sign_msg(('AWS4' + self.aws_secret_access_key).encode('utf-8'), self.datestamp)
kRegion = sign_msg(kDate, self.region)
kService = sign_msg(kRegion, self.service)
kSigning = sign_msg(kService, 'aws4_request')
signature = hmac.new(kSigning, string_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
log.debug("Signature: %s", signature)
## Task 4: Add signing information to request
r.headers['Authorization'] = "AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}".format(
self.aws_access_key_id, credential_scope, signed_headers, signature)
log.debug("Returning Request: <PreparedRequest method=%s, url=%s, headers=%s, SignedHeaders=%s, Signature=%s",
r.method, r.url, r.headers, signed_headers, signature)
return r
| (service, **kwargs) |
7,348 | requests_auth_aws_sigv4 | __call__ | Called to add authentication information to request
:param r: `requests.models.PreparedRequest` object to modify
:returns: `requests.models.PreparedRequest`, modified to add authentication
| def __call__(self, r):
''' Called to add authentication information to request
:param r: `requests.models.PreparedRequest` object to modify
:returns: `requests.models.PreparedRequest`, modified to add authentication
'''
# Create a date for headers and the credential string
t = datetime.datetime.utcnow()
self.amzdate = t.strftime('%Y%m%dT%H%M%SZ')
self.datestamp = t.strftime('%Y%m%d')
log.debug("Starting authentication with amzdate=%s", self.amzdate)
# Parse request to get URL parts
p = urlparse(r.url)
log.debug("Request URL: %s", p)
host = p.hostname
uri = p.path
if len(p.query) > 0:
qs = dict(map(lambda i: i.split('='), p.query.split('&')))
else:
qs = dict()
# Setup Headers
# r.headers is type `requests.structures.CaseInsensitiveDict`
if 'Host' not in r.headers:
r.headers['Host'] = host
if 'Content-Type' not in r.headers:
r.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
if 'User-Agent' not in r.headers:
r.headers['User-Agent'] = 'python-requests/{} auth-aws-sigv4/{}'.format(
requests_version, __version__)
r.headers['X-AMZ-Date'] = self.amzdate
if self.aws_session_token is not None:
r.headers['x-amz-security-token'] = self.aws_session_token
## Task 1: Create Cononical Request
## Ref: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
# Query string values must be URL-encoded (space=%20) and be sorted by name.
canonical_querystring = "&".join(map(lambda p: "=".join(p), sorted(qs.items())))
# Create payload hash (hash of the request body content).
if r.method == 'GET':
payload_hash = hashlib.sha256(('').encode('utf-8')).hexdigest()
else:
if r.body:
if isinstance(r.body, bytes):
log.debug("Request Body: <bytes> %s", r.body)
payload_hash = hashlib.sha256(r.body).hexdigest()
else:
log.debug("Request Body: <str> %s", r.body)
payload_hash = hashlib.sha256(r.body.encode('utf-8')).hexdigest()
else:
log.debug("Request Body is empty")
payload_hash = hashlib.sha256(b'').hexdigest()
r.headers['x-amz-content-sha256'] = payload_hash
# Create the canonical headers and signed headers. Header names
# must be trimmed and lowercase, and sorted in code point order from
# low to high. Note that there is a trailing \n.
headers_to_sign = sorted(filter(lambda h: h.startswith('x-amz-') or h == 'host',
map(lambda H: H.lower(), r.headers.keys())))
canonical_headers = ''.join(map(lambda h: ":".join((h, r.headers[h])) + '\n', headers_to_sign))
signed_headers = ';'.join(headers_to_sign)
# Combine elements to create canonical request
canonical_request = '\n'.join([r.method, uri, canonical_querystring,
canonical_headers, signed_headers, payload_hash])
log.debug("Canonical Request: '%s'", canonical_request)
## Task 2: Create string to sign
credential_scope = '/'.join([self.datestamp, self.region, self.service, 'aws4_request'])
string_to_sign = '\n'.join(['AWS4-HMAC-SHA256', self.amzdate,
credential_scope, hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()])
log.debug("String-to-Sign: '%s'", string_to_sign)
## Task 3: Calculate Signature
kDate = sign_msg(('AWS4' + self.aws_secret_access_key).encode('utf-8'), self.datestamp)
kRegion = sign_msg(kDate, self.region)
kService = sign_msg(kRegion, self.service)
kSigning = sign_msg(kService, 'aws4_request')
signature = hmac.new(kSigning, string_to_sign.encode('utf-8'), hashlib.sha256).hexdigest()
log.debug("Signature: %s", signature)
## Task 4: Add signing information to request
r.headers['Authorization'] = "AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}".format(
self.aws_access_key_id, credential_scope, signed_headers, signature)
log.debug("Returning Request: <PreparedRequest method=%s, url=%s, headers=%s, SignedHeaders=%s, Signature=%s",
r.method, r.url, r.headers, signed_headers, signature)
return r
| (self, r) |
7,349 | requests_auth_aws_sigv4 | __init__ | Create authentication mechanism
:param service: AWS Service identifier, for example `ec2`. This is required.
:param region: AWS Region, for example `us-east-1`. If not provided, it will be set using
the environment variables `AWS_DEFAULT_REGION` or using boto3, if available.
:param session: If boto3 is available, will attempt to get credentials using boto3,
unless passed explicitly. If using boto3, the provided session will be used or a new
session will be created.
| def __init__(self, service, **kwargs):
''' Create authentication mechanism
:param service: AWS Service identifier, for example `ec2`. This is required.
:param region: AWS Region, for example `us-east-1`. If not provided, it will be set using
the environment variables `AWS_DEFAULT_REGION` or using boto3, if available.
:param session: If boto3 is available, will attempt to get credentials using boto3,
unless passed explicitly. If using boto3, the provided session will be used or a new
session will be created.
'''
# Set Service
self.service = service
if USE_BOTO3:
# Setup Session
if 'session' in kwargs:
if type(kwargs['session']) == boto3.Session:
session = kwargs['session']
else:
raise ValueError("Session must be boto3.Session, {} invalid, ".format(type(kwargs['session'])))
else:
session = boto3.Session()
log.debug("Using boto3 session: %s", session)
# First, get credentials passed explicitly
self.aws_access_key_id = kwargs.get('aws_access_key_id')
self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
self.aws_session_token = kwargs.get('aws_session_token')
# Next, try environment variables or use boto3
if self.aws_access_key_id is None or self.aws_secret_access_key is None:
if USE_BOTO3:
cred = session.get_credentials()
log.debug("Got credential from boto3 session")
self.aws_access_key_id = cred.access_key
self.aws_secret_access_key = cred.secret_key
self.aws_session_token = cred.token
else:
log.debug("Checking environment for credentials")
self.aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID')
self.aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
self.aws_session_token = os.environ.get('AWS_SESSION_TOKEN') or os.environ.get('AWS_SECURITY_TOKEN')
# Last, fail if still not found
if self.aws_access_key_id is None or self.aws_secret_access_key is None:
raise KeyError("AWS Access Key ID and Secret Access Key are required")
# Get Region passed explicitly
self.region = kwargs.get('region')
# Next, try environment variables or use boto3
if self.region is None:
if USE_BOTO3:
self.region = session.region_name
log.debug("Got region from boto3 session")
else:
log.debug("Checking environment for region")
self.region = os.environ.get('AWS_DEFAULT_REGION')
# Last, fail if not found
if self.region is None:
raise KeyError("Region is required")
| (self, service, **kwargs) |
7,350 | requests.auth | AuthBase | Base class that all auth implementations derive from | class AuthBase:
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError("Auth hooks must be callable.")
| () |
7,351 | requests.auth | __call__ | null | def __call__(self, r):
raise NotImplementedError("Auth hooks must be callable.")
| (self, r) |
7,359 | requests_auth_aws_sigv4 | sign_msg | Sign message using key | def sign_msg(key, msg):
''' Sign message using key '''
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
| (key, msg) |
Subsets and Splits