code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def parse(self):
data = json.loads(sys.argv[1])
self.config_path = self.decode(data['config_path'])
self.subject = self.decode(data['subject'])
self.text = self.decode(data['text'])
self.html = self.decode(data['html'])
self.send_as_one = data['send_as_one']
if 'files' in data:
self.parse_files(data['files'])
self.ccs = data['ccs']
self.addresses = data['addresses']
if not self.addresses:
raise ValueError(
'Atleast one email address is required to send an email') | parses args json |
def construct_message(self, email=None):
# add subject, from and to
self.multipart['Subject'] = self.subject
self.multipart['From'] = self.config['EMAIL']
self.multipart['Date'] = formatdate(localtime=True)
if email is None and self.send_as_one:
self.multipart['To'] = ", ".join(self.addresses)
elif email is not None and self.send_as_one is False:
self.multipart['To'] = email
# add ccs
if self.ccs is not None and self.ccs:
self.multipart['Cc'] = ", ".join(self.ccs)
# add html and text body
html = MIMEText(self.html, 'html')
alt_text = MIMEText(self.text, 'plain')
self.multipart.attach(html)
self.multipart.attach(alt_text)
for file in self.files:
self.multipart.attach(file) | construct the email message |
def send(self, email=None):
if email is None and self.send_as_one:
self.smtp.send_message(
self.multipart, self.config['EMAIL'], self.addresses)
elif email is not None and self.send_as_one is False:
self.smtp.send_message(
self.multipart, self.config['EMAIL'], email)
self.multipart = MIMEMultipart('alternative') | send email message |
def create_email(self):
self.connect()
if self.send_as_one:
self.construct_message()
self.send()
elif self.send_as_one is False:
for email in self.addresses:
self.construct_message(email)
self.send(email)
self.disconnect() | main function to construct and send email |
def get_definition(query):
try:
return get_definition_api(query)
except:
raise
# http://api.wordnik.com:80/v4/word.json/discrimination/definitions?limit=200&includeRelated=true&sourceDictionaries=all&useCanonical=false&includeTags=false&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5
import json
payload = {'q': query, 'limit': 200, 'includeRelated': 'true', 'sourceDictionaries': 'all',
'useCanonical': 'false', 'includeTags': 'false',
'api_key': 'a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5'}
url = 'http://api.wordnik.com:80/v4/word.json/%s/definitions' % query
r = requests.get(url, params=payload)
result = json.loads(r.text)
return result | Returns dictionary of id, first names of people who posted on my wall
between start and end time |
def intversion(text=None):
try:
s = text
if not s:
s = version()
s = s.split('ubuntu')[0]
s = s.split(':')[-1]
s = s.split('+')[0]
# <100
if s.startswith('00'):
i = int(s[0:4])
# >=100
elif '.' in s:
ls = s.split('.')
ls += [0, 0, 0]
i = int(ls[0]) * 100 + int(ls[1]) * 10 + int(ls[2])
except ValueError:
print ('Can not parse version: %s' % text)
raise
return i | return version as int.
0022 -> 22
0022ubuntu0.1 -> 22
0023 -> 23
1.0 -> 100
1.0.3 -> 103
1:1.0.5+dfsg2-2 -> 105 |
def set_current_session(session_id) -> bool:
try:
g.session_id = session_id
return True
except (Exception, BaseException) as error:
# catch all on config update
if current_app.config['DEBUG']:
print(error)
return False | Add session_id to flask globals for current request |
def create_session(cls, session_id, user_id):
count = SessionModel.count(user_id)
if count < current_app.config['AUTH']['MAX_SESSIONS']:
cls.__save_session(session_id, user_id)
return
elif count >= current_app.config['AUTH']['MAX_SESSIONS']:
earliest_session = SessionModel.where_earliest(user_id)
earliest_session.delete()
cls.__save_session(session_id, user_id)
return | Save a new session to the database
Using the ['AUTH']['MAX_SESSIONS'] config setting
a session with be created within the MAX_SESSIONS
limit. Once this limit is hit, delete the earliest
session. |
def visit_member(self, attribute_key, attribute, member_node, member_data,
is_link_node, parent_data, index=None):
raise NotImplementedError('Abstract method.') | Visits a member node in a resource data tree.
:param tuple attribute_key: tuple containing the attribute tokens
identifying the member node's position in the resource data tree.
:param attribute: mapped attribute holding information about the
member node's name (in the parent) and type etc.
:type attribute:
:class:`everest.representers.attributes.MappedAttribute`
:param member_node: the node holding resource data. This is either a
resource instance (when using a :class:`ResourceTreeTraverser` on
a tree of resources) or a data element instance (when using a
:class:`DataElementTreeTraverser` on a data element tree.
:param dict member_data: dictionary holding all member data
extracted during traversal (with mapped attributes as keys).
:param bool is_link_node: indicates if the given member node is a link.
:param dict parent_data: dictionary holding all parent data extracted
during traversal (with mapped attributes as keys).
:param int index: this indicates a member node's index in a collection
parent node. If the parent node is a member node, it will be `None`. |
def get_relationship(self, attribute):
rel = self.__relationships.get(attribute.entity_attr)
if rel is None:
rel = LazyDomainRelationship(self, attribute,
direction=
self.relationship_direction)
self.__relationships[attribute.entity_attr] = rel
return rel | Returns the domain relationship object for the given resource
attribute. |
def establish_rabbitmq_connection(rabbitmq_uri):
userid, password, host, port, virtual_host, ssl = translate_rabbitmq_url(rabbitmq_uri)
connection = Connection(userid=userid,
password=password,
host=host,
port=port,
virtual_host=virtual_host,
ssl=False)
return connection | What it says on the tin.
Input: - rabbitmq_uri: A RabbitMQ URI.
Output: - connection: A RabbitMQ connection. |
def simple_notification(connection, queue_name, exchange_name, routing_key, text_body):
channel = connection.channel()
try:
channel.queue_declare(queue_name, durable=True, exclusive=False, auto_delete=False)
except PreconditionFailed:
pass
try:
channel.exchange_declare(exchange_name, type="fanout", durable=True, auto_delete=False)
except PreconditionFailed:
pass
channel.queue_bind(queue_name, exchange_name, routing_key=routing_key)
message = Message(text_body)
channel.basic_publish(message, exchange_name, routing_key) | Publishes a simple notification.
Inputs: - connection: A rabbitmq connection object.
- queue_name: The name of the queue to be checked or created.
- exchange_name: The name of the notification exchange.
- routing_key: The routing key for the exchange-queue binding.
- text_body: The text to be published. |
def fmt_datetime(d, fmt='', local=True):
if not d:
return ''
if local:
from django.templatetags.tz import localtime
d = localtime(d)
if not fmt:
fmt = '%Y-%m-%d %H-%M'
return d.strftime(fmt) | format date with local support
``d``: datetime to format
``fmt``: format, default is '%Y-%m-%d %H-%M'
``local``: format as local time |
def get_url(self, url_or_dict):
if isinstance(url_or_dict, basestring):
url_or_dict = {'viewname': url_or_dict}
try:
return reverse(**url_or_dict)
except NoReverseMatch:
if MENU_DEBUG:
print >>stderr,'Unable to reverse URL with kwargs %s' % url_or_dict | Returns the reversed url given a string or dict and prints errors if MENU_DEBUG is enabled |
def get_time(self):
if isinstance(self.path, pathlib.Path):
thetime = self.path.stat().st_mtime
else:
thetime = np.nan
return thetime | Time of the TIFF file
Currently, only the file modification time is supported.
Note that the modification time of the TIFF file is
dependent on the file system and may have temporal
resolution as low as 3 seconds. |
def get_qpimage_raw(self, idx=0):
# Load experimental data
with SingleTifHolo._get_tif(self.path) as tf:
holo = tf.pages[0].asarray()
meta_data = copy.copy(self.meta_data)
qpi = qpimage.QPImage(data=(holo),
which_data="hologram",
meta_data=meta_data,
holo_kw=self.holo_kw,
h5dtype=self.as_type)
# set identifier
qpi["identifier"] = self.get_identifier()
qpi["time"] = self.get_time()
return qpi | Return QPImage without background correction |
def verify(path):
valid = False
try:
tf = SingleTifHolo._get_tif(path)
except (ValueError, IsADirectoryError):
pass
else:
if len(tf) == 1:
valid = True
return valid | Verify that `path` is a valid TIFF file |
async def songs(self):
'''list of songs in the playlist
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
items = []
for i in await self.items:
if i.type == 'Audio':
items.append(i)
elif hasattr(i, 'songs'):
items.extend(await i.songs)
return itemync def songs(self):
'''list of songs in the playlist
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
items = []
for i in await self.items:
if i.type == 'Audio':
items.append(i)
elif hasattr(i, 'songs'):
items.extend(await i.songs)
return items | list of songs in the playlist
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio` |
async def add_items(self, *items):
'''append items to the playlist
|coro|
Parameters
----------
items : array_like
list of items to add(or their ids)
See Also
--------
remove_items :
'''
items = [item.id for item in await self.process(items)]
if not items:
return
await self.connector.post('Playlists/{Id}/Items'.format(Id=self.id),
data={'Ids': ','.join(items)}, remote=False
ync def add_items(self, *items):
'''append items to the playlist
|coro|
Parameters
----------
items : array_like
list of items to add(or their ids)
See Also
--------
remove_items :
'''
items = [item.id for item in await self.process(items)]
if not items:
return
await self.connector.post('Playlists/{Id}/Items'.format(Id=self.id),
data={'Ids': ','.join(items)}, remote=False
) | append items to the playlist
|coro|
Parameters
----------
items : array_like
list of items to add(or their ids)
See Also
--------
remove_items : |
async def remove_items(self, *items):
'''remove items from the playlist
|coro|
Parameters
----------
items : array_like
list of items to remove(or their ids)
See Also
--------
add_items :
'''
items = [i.id for i in (await self.process(items)) if i in self.items]
if not items:
return
await self.connector.delete(
'Playlists/{Id}/Items'.format(Id=self.id),
EntryIds=','.join(items),
remote=False
ync def remove_items(self, *items):
'''remove items from the playlist
|coro|
Parameters
----------
items : array_like
list of items to remove(or their ids)
See Also
--------
add_items :
'''
items = [i.id for i in (await self.process(items)) if i in self.items]
if not items:
return
await self.connector.delete(
'Playlists/{Id}/Items'.format(Id=self.id),
EntryIds=','.join(items),
remote=False
) | remove items from the playlist
|coro|
Parameters
----------
items : array_like
list of items to remove(or their ids)
See Also
--------
add_items : |
async def movies(self):
'''list of movies in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
items = []
for i in await self.items:
if i.type == 'Movie':
items.append(i)
elif hasattr(i, 'movies'):
items.extend(await i.movies)
return itemync def movies(self):
'''list of movies in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
items = []
for i in await self.items:
if i.type == 'Movie':
items.append(i)
elif hasattr(i, 'movies'):
items.extend(await i.movies)
return items | list of movies in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie` |
async def series(self):
'''list of series in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
items = []
for i in await self.items:
if i.type == 'Series':
items.append(i)
elif hasattr(i, 'series'):
items.extend(await i.series)
return itemync def series(self):
'''list of series in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
items = []
for i in await self.items:
if i.type == 'Series':
items.append(i)
elif hasattr(i, 'series'):
items.extend(await i.series)
return items | list of series in the collection
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series` |
def save(self, *args, **kwargs):
if not self.target:
self.target = str(self.content_object)
if not self.actor_name:
self.actor_name = str(self.actor)
super(Activity, self).save() | Store a string representation of content_object as target
and actor name for fast retrieval and sorting. |
def short_action_string(self):
output = "{0} ".format(self.actor)
if self.override_string:
output += self.override_string
else:
output += self.verb
return output | Returns string with actor and verb, allowing target/object
to be filled in manually.
Example:
[actor] [verb] or
"Joe cool posted a comment" |
def image(self):
obj = self.content_object
# First, try to get from a get_image() helper method
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
# if we didn't find one, try to get it from foo.image
# This allows get_image to take precedence for greater control.
if not image:
try:
image = obj.image
except AttributeError:
try:
image = obj.content_object.image
except:
return None
# Finally, ensure we're getting an image, not an image object
# with caption and byline and other things.
try:
return image.image
except AttributeError:
return image | Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped. |
def getversion(package):
distribution = get_distribution(package)
if distribution is None:
raise RuntimeError("Can't find distribution {0}".format(package))
repo_type = get_repo_type(distribution.location)
if repo_type is None:
return distribution.version
return repo_type.get_version(distribution.location) | Obtain the ``__version__`` for ``package``, looking at the egg information
or the source repository's version control if necessary. |
def version_from_frame(frame):
module = getmodule(frame)
if module is None:
s = "<unknown from {0}:{1}>"
return s.format(frame.f_code.co_filename, frame.f_lineno)
module_name = module.__name__
variable = "AUTOVERSION_{}".format(module_name.upper())
override = os.environ.get(variable, None)
if override is not None:
return override
while True:
try:
get_distribution(module_name)
except DistributionNotFound:
# Look at what's to the left of "."
module_name, dot, _ = module_name.partition(".")
if dot == "":
# There is no dot, nothing more we can do.
break
else:
return getversion(module_name)
return None | Given a ``frame``, obtain the version number of the module running there. |
def try_fix_num(n):
if not n.isdigit():
return n
if n.startswith("0"):
n = n.lstrip("0")
if not n:
n = "0"
return int(n) | Return ``n`` as an integer if it is numeric, otherwise return the input |
def tupleize_version(version):
if version is None:
return (("unknown",),)
if version.startswith("<unknown"):
return (("unknown",),)
split = re.split("(?:\.|(-))", version)
parsed = tuple(try_fix_num(x) for x in split if x)
# Put the tuples in groups by "-"
def is_dash(s):
return s == "-"
grouped = groupby(parsed, is_dash)
return tuple(tuple(group) for dash, group in grouped if not dash) | Split ``version`` into a lexicographically comparable tuple.
"1.0.3" -> ((1, 0, 3),)
"1.0.3-dev" -> ((1, 0, 3), ("dev",))
"1.0.3-rc-5" -> ((1, 0, 3), ("rc",), (5,)) |
def get_version(cls, path, memo={}):
if path not in memo:
memo[path] = subprocess.check_output(
"git describe --tags --dirty 2> /dev/null",
shell=True, cwd=path).strip().decode("utf-8")
v = re.search("-[0-9]+-", memo[path])
if v is not None:
# Replace -n- with -branchname-n-
branch = r"-{0}-\1-".format(cls.get_branch(path))
(memo[path], _) = re.subn("-([0-9]+)-", branch, memo[path], 1)
return memo[path] | Return a string describing the version of the repository at ``path`` if
possible, otherwise throws ``subprocess.CalledProcessError``.
(Note: memoizes the result in the ``memo`` parameter) |
def is_repo_instance(cls, path):
try:
cls.get_version(path)
return True
except subprocess.CalledProcessError:
# Git returns non-zero status
return False
except OSError:
# Git unavailable?
return False | Return ``True`` if ``path`` is a source controlled repository. |
def get_line(thing):
try:
return inspect.getsourcelines(thing)[1]
except TypeError:
# Might be a property
return inspect.getsourcelines(thing.fget)[1]
except Exception as e:
# print(thing)
raise e | Get the line number for something.
Parameters
----------
thing : function, class, module
Returns
-------
int
Line number in the source file |
def _sort_modules(mods):
def compare(x, y):
x = x[1]
y = y[1]
if x == y:
return 0
if y.stem == "__init__.py":
return 1
if x.stem == "__init__.py" or x < y:
return -1
return 1
return sorted(mods, key=cmp_to_key(compare)) | Always sort `index` or `README` as first filename in list. |
def refs_section(doc):
lines = []
if "References" in doc and len(doc["References"]) > 0:
# print("Found refs")
for ref in doc["References"]:
# print(ref)
ref_num = re.findall("\[([0-9]+)\]", ref)[0]
# print(ref_num)
ref_body = " ".join(ref.split(" ")[2:])
# print(f"[^{ref_num}] {ref_body}" + "\n")
lines.append(f"[^{ref_num}]: {ref_body}" + "\n\n")
# print(lines)
return lines | Generate a References section.
Parameters
----------
doc : dict
Dictionary produced by numpydoc
Returns
-------
list of str
Markdown for references section |
def examples_section(doc, header_level):
lines = []
if "Examples" in doc and len(doc["Examples"]) > 0:
lines.append(f"{'#'*(header_level+1)} Examples \n")
egs = "\n".join(doc["Examples"])
lines += mangle_examples(doc["Examples"])
return lines | Generate markdown for Examples section.
Parameters
----------
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section |
def summary(doc):
lines = []
if "Summary" in doc and len(doc["Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Summary"])))
lines.append("\n")
if "Extended Summary" in doc and len(doc["Extended Summary"]) > 0:
lines.append(fix_footnotes(" ".join(doc["Extended Summary"])))
lines.append("\n")
return lines | Generate markdown for summary section.
Parameters
----------
doc : dict
Output from numpydoc
Returns
-------
list of str
Markdown strings |
def params_section(thing, doc, header_level):
lines = []
class_doc = doc["Parameters"]
return type_list(
inspect.signature(thing),
class_doc,
"#" * (header_level + 1) + " Parameters\n\n",
) | Generate markdown for Parameters section.
Parameters
----------
thing : functuon
Function to produce parameters from
doc : dict
Dict from numpydoc
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown for examples section |
def get_source_link(thing, source_location):
try:
lineno = get_line(thing)
try:
owner_module = inspect.getmodule(thing)
assert owner_module is not None
except (TypeError, AssertionError):
owner_module = inspect.getmodule(thing.fget)
thing_file = "/".join(owner_module.__name__.split("."))
if owner_module.__file__.endswith("__init__.py"):
thing_file += "/__init__.py"
else:
thing_file += ".py"
return (
f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})"
+ "\n\n"
)
except Exception as e:
# print("Failed to find source file.")
# print(e)
# print(lineno)
# print(thing)
# print(owner_module)
# print(thing_file)
# print(source_location)
pass
return "" | Get a link to the line number a module/class/function is defined at.
Parameters
----------
thing : function or class
Thing to get the link for
source_location : str
GitHub url of the source code
Returns
-------
str
String with link to the file & line number, or empty string if it
couldn't be found |
def get_signature(name, thing):
if inspect.ismodule(thing):
return ""
if isinstance(thing, property):
func_sig = name
else:
try:
sig = inspect.signature(thing)
except TypeError:
sig = inspect.signature(thing.fget)
except ValueError:
return ""
func_sig = f"{name}{sig}"
try:
mode = black.FileMode(line_length=80)
func_sig = black.format_str(func_sig, mode=mode).strip()
except (ValueError, TypeError):
pass
return f"```python\n{func_sig}\n```\n" | Get the signature for a function or class, formatted nicely if possible.
Parameters
----------
name : str
Name of the thing, used as the first part of the signature
thing : class or function
Thing to get the signature of |
def _get_names(names, types):
if types == "":
try:
names, types = names.split(":")
except:
pass
return names.split(","), types | Get names, bearing in mind that there might be no name,
no type, and that the `:` separator might be wrongly used. |
def string_annotation(typ, default):
try:
type_string = (
f"`{typ.__name__}`"
if typ.__module__ == "builtins"
else f"`{typ.__module__}.{typ.__name__}`"
)
except AttributeError:
type_string = f"`{str(typ)}`"
if default is None:
type_string = f"{type_string}, default ``None``"
elif default == inspect._empty:
pass
else:
type_string = f"{type_string}, default ``{default}``"
return type_string | Construct a string representation of a type annotation.
Parameters
----------
typ : type
Type to turn into a string
default : any
Default value (if any) of the type
Returns
-------
str
String version of the type annotation |
def type_list(signature, doc, header):
lines = []
docced = set()
lines.append(header)
try:
for names, types, description in doc:
names, types = _get_names(names, types)
unannotated = []
for name in names:
docced.add(name)
try:
typ = signature.parameters[name].annotation
if typ == inspect._empty:
raise AttributeError
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
unannotated.append(name) # No annotation
if len(unannotated) > 0:
lines.append("- ")
lines.append(", ".join(f"`{name}`" for name in unannotated))
if types != "" and len(unannotated) > 0:
lines.append(f": {mangle_types(types)}")
lines.append("\n\n")
lines.append(f" {' '.join(description)}\n\n")
for names, types, description in doc:
names, types = _get_names(names, types)
for name in names:
if name not in docced:
try:
typ = signature.parameters[name].annotation
default = signature.parameters[name].default
type_string = string_annotation(typ, default)
lines.append(f"- `{name}`: {type_string}")
lines.append("\n\n")
except (AttributeError, KeyError):
lines.append(f"- `{name}`")
lines.append("\n\n")
except Exception as e:
print(e)
return lines if len(lines) > 1 else [] | Construct a list of types, preferring type annotations to
docstrings if they are available.
Parameters
----------
signature : Signature
Signature of thing
doc : list of tuple
Numpydoc's type list section
Returns
-------
list of str
Markdown formatted type list |
def _split_props(thing, doc):
props = inspect.getmembers(thing, lambda o: isinstance(o, property))
ps = []
docs = [
(*_get_names(names, types), names, types, desc) for names, types, desc in doc
]
for prop_name, prop in props:
in_doc = [d for d in enumerate(docs) if prop_name in d[0]]
for d in in_doc:
docs.remove(d)
ps.append(prop_name)
if len(docs) > 0:
_, _, names, types, descs = zip(*docs)
return ps, zip(names, types, descs)
return ps, [] | Separate properties from other kinds of member. |
def attributes_section(thing, doc, header_level):
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl | Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list |
def enum_doc(name, enum, header_level, source_location):
lines = [f"{'#'*header_level} Enum **{name}**\n\n"]
lines.append(f"```python\n{name}\n```\n")
lines.append(get_source_link(enum, source_location))
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
except:
pass
lines.append(f"{'#'*(header_level + 1)} Members\n\n")
lines += [f"- `{str(v).split('.').pop()}`: `{v.value}` \n\n" for v in enum]
return lines | Generate markdown for an enum
Parameters
----------
name : str
Name of the thing being documented
enum : EnumMeta
Enum to document
header_level : int
Heading level
source_location : str
URL of repo containing source code |
def to_doc(name, thing, header_level, source_location):
if type(thing) is enum.EnumMeta:
return enum_doc(name, thing, header_level, source_location)
if inspect.isclass(thing):
header = f"{'#'*header_level} Class **{name}**\n\n"
else:
header = f"{'#'*header_level} {name}\n\n"
lines = [
header,
get_signature(name, thing),
get_source_link(thing, source_location),
]
try:
doc = NumpyDocString(inspect.getdoc(thing))._parsed_data
lines += summary(doc)
lines += attributes_section(thing, doc, header_level)
lines += params_section(thing, doc, header_level)
lines += returns_section(thing, doc, header_level)
lines += examples_section(doc, header_level)
lines += notes_section(doc)
lines += refs_section(doc)
except Exception as e:
# print(f"No docstring for {name}, src {source_location}: {e}")
pass
return lines | Generate markdown for a class or function
Parameters
----------
name : str
Name of the thing being documented
thing : class or function
Class or function to document
header_level : int
Heading level
source_location : str
URL of repo containing source code |
def doc_module(module_name, module, output_dir, source_location, leaf):
path = pathlib.Path(output_dir).joinpath(*module.__name__.split("."))
available_classes = get_available_classes(module)
deffed_classes = get_classes(module)
deffed_funcs = get_funcs(module)
deffed_enums = get_enums(module)
alias_funcs = available_classes - deffed_classes
if leaf:
doc_path = path.with_suffix(".md")
else:
doc_path = path / "index.md"
doc_path.parent.mkdir(parents=True, exist_ok=True)
module_path = "/".join(module.__name__.split("."))
doc = [f"title: {module_name.split('.')[-1]}" + "\n"]
module_doc = module.__doc__
# Module overview documentation
if module_doc is not None:
doc += to_doc(module.__name__, module, 1, source_location)
else:
doc.append(f"# {module.__name__}\n\n")
doc.append("\n\n")
for cls_name, cls in sorted(deffed_enums) + sorted(deffed_classes):
doc += to_doc(cls_name, cls, 2, source_location)
class_methods = [
x
for x in inspect.getmembers(cls, inspect.isfunction)
if (not x[0].startswith("_")) and deffed_here(x[1], cls)
]
class_methods += inspect.getmembers(cls, lambda o: isinstance(o, property))
if len(class_methods) > 0:
doc.append("### Methods \n\n")
for method_name, method in class_methods:
doc += to_doc(method_name, method, 4, source_location)
for fname, func in sorted(deffed_funcs):
doc += to_doc(fname, func, 2, source_location)
return doc_path.absolute(), "".join(doc) | Document a module
Parameters
----------
module_name : str
module : module
output_dir : str
source_location : str
leaf : bool |
def set_color(fg=None, bg=None):
if fg or bg:
_color_manager.set_color(fg, bg)
else:
_color_manager.set_defaults() | Set the current colors.
If no arguments are given, sets default colors. |
def cprint(string, fg=None, bg=None, end='\n', target=sys.stdout):
_color_manager.set_color(fg, bg)
target.write(string + end)
target.flush() # Needed for Python 3.x
_color_manager.set_defaults() | Print a colored string to the target handle.
fg and bg specify foreground- and background colors, respectively. The
remaining keyword arguments are the same as for Python's built-in print
function. Colors are returned to their defaults before the function
returns. |
def fprint(fmt, *args, **kwargs):
if not fmt:
return
hascolor = False
target = kwargs.get("target", sys.stdout)
# Format the string before feeding it to the parser
fmt = fmt.format(*args, **kwargs)
for txt, markups in _color_format_parser.parse(fmt):
if markups != (None, None):
_color_manager.set_color(*markups)
hascolor = True
else:
if hascolor:
_color_manager.set_defaults()
hascolor = False
target.write(txt)
target.flush() # Needed for Python 3.x
_color_manager.set_defaults()
target.write(kwargs.get('end', '\n'))
_color_manager.set_defaults() | Parse and print a colored and perhaps formatted string.
The remaining keyword arguments are the same as for Python's built-in print
function. Colors are returning to their defaults before the function
returns. |
def formatcolor(string, fg=None, bg=None):
if fg is bg is None:
return string
temp = (['fg='+fg] if fg else []) +\
(['bg='+bg] if bg else [])
fmt = _color_format_parser._COLOR_DELIM.join(temp)
return _color_format_parser._START_TOKEN + fmt +\
_color_format_parser._FMT_TOKEN + string +\
_color_format_parser._STOP_TOKEN | Wrap color syntax around a string and return it.
fg and bg specify foreground- and background colors, respectively. |
def formatbyindex(string, fg=None, bg=None, indices=[]):
if not string or not indices or (fg is bg is None):
return string
result, p = '', 0
# The lambda syntax is necessary to support both Python 2 and 3
for k, g in itertools.groupby(enumerate(sorted(indices)),
lambda x: x[0]-x[1]):
tmp = list(map(operator.itemgetter(1), g))
s, e = tmp[0], tmp[-1]+1
if s < len(string):
result += string[p:s]
result += formatcolor(string[s:e], fg, bg)
p = e
if p < len(string):
result += string[p:]
return result | Wrap color syntax around characters using indices and return it.
fg and bg specify foreground- and background colors, respectively. |
def highlight(string, fg=None, bg=None, indices=[], end='\n',
target=sys.stdout):
if not string or not indices or (fg is bg is None):
return
p = 0
# The lambda syntax is necessary to support both Python 2 and 3
for k, g in itertools.groupby(enumerate(sorted(indices)),
lambda x: x[0]-x[1]):
tmp = list(map(operator.itemgetter(1), g))
s, e = tmp[0], tmp[-1]+1
target.write(string[p:s])
target.flush() # Needed for Python 3.x
_color_manager.set_color(fg, bg)
target.write(string[s:e])
target.flush() # Needed for Python 3.x
_color_manager.set_defaults()
p = e
if p < len(string):
target.write(string[p:])
target.write(end) | Highlight characters using indices and print it to the target handle.
fg and bg specify foreground- and background colors, respectively. The
remaining keyword arguments are the same as for Python's built-in print
function. |
def create_update_parameter(task_params, parameter_map):
gp_params = []
for param in task_params:
if param['direction'].upper() == 'OUTPUT':
continue
# Convert DataType
data_type = param['type'].upper()
if 'dimensions' in param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].update_parameter().substitute(param))
return ''.join(gp_params) | Builds the code block for the GPTool UpdateParameter method based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool UpdateParameter method. |
def create_pre_execute(task_params, parameter_map):
gp_params = [_PRE_EXECUTE_INIT_TEMPLATE]
for task_param in task_params:
if task_param['direction'].upper() == 'OUTPUT':
continue
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].pre_execute().substitute(task_param))
gp_params.append(_PRE_EXECUTE_CLEANUP_TEMPLATE)
return ''.join(gp_params) | Builds the code block for the GPTool Execute method before the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method. |
def create_post_execute(task_params, parameter_map):
gp_params = []
for task_param in task_params:
if task_param['direction'].upper() == 'INPUT':
continue
# Convert DataType
data_type = task_param['type'].upper()
if 'dimensions' in task_param:
data_type += 'ARRAY'
if data_type in parameter_map:
gp_params.append(parameter_map[data_type].post_execute().substitute(task_param))
return ''.join(gp_params) | Builds the code block for the GPTool Execute method after the job is
submitted based on the input task_params.
:param task_params: A list of task parameters from the task info structure.
:return: A string representing the code block to the GPTool Execute method. |
def load_default_templates(self):
for importer, modname, is_pkg in pkgutil.iter_modules(templates.__path__):
self.register_template('.'.join((templates.__name__, modname))) | Load the default templates |
def register_template(self, module):
parameter_module = importlib.import_module(module)
parameter_template = parameter_module.template()
self.parameter_map[parameter_template.__class__.__name__.upper()] = \
parameter_template | Register a non-default template
:param module: The full package path including the module name
of the template to load. |
def main():
args = parser.parse_args()
scheduler = Scheduler()
print 'Start %s' % scheduler.scheduler_id
scheduler.interval = args.interval
if args.keepalive:
scheduler.run(once=True)
keepalive = Job('simplescheduler.keepalive',
args=[0,
scheduler.get_running_scheduler_id(),
args.interval * 2])
scheduler.schedule(keepalive, long(time.time() * 1000000))
scheduler._run() | SimpleScheduler
redis parameters will be read from environment variables:
REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_KEY (password) |
def remove_hwpack(name):
targ_dlib = hwpack_dir() / name
log.debug('remove %s', targ_dlib)
targ_dlib.rmtree() | remove hardware package.
:param name: hardware package name (e.g. 'Sanguino')
:rtype: None |
def finalize(self):
super(StatisticsConsumer, self).finalize()
# run statistics on timewave slice w at grid point g
# self.result = [(g, self.statistics(w)) for g, w in zip(self.grid, self.result)]
# self.result = zip(self.grid, (self.statistics(w) for w in self.result))
self.result = zip(self.grid, map(self.statistics, self.result)) | finalize for StatisticsConsumer |
def finalize(self):
super(StochasticProcessStatisticsConsumer, self).finalize()
class StochasticProcessStatistics(self.statistics):
"""local version to store statistics"""
def __str__(self):
s = [k.rjust(12) + str(getattr(self, k)) for k in dir(self) if not k.startswith('_')]
return '\n'.join(s)
sps = StochasticProcessStatistics([0, 0])
keys = list()
for k in dir(sps):
if not k.startswith('_'):
a = getattr(sps, k)
if isinstance(a, (int, float, str)):
keys.append(k)
else:
delattr(sps, k)
for k in keys:
setattr(sps, k, list())
grid = list()
for g, r in self.result:
grid.append(g)
for k in keys:
a = getattr(sps, k)
a.append(getattr(r, k))
self.result = grid, sps | finalize for StochasticProcessStatisticsConsumer |
def setup_keyword(dist, _, value):
# type: (setuptools.dist.Distribution, str, bool) -> None
if value is not True:
return
dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points)
for command, subcommands in six.iteritems(_get_commands(dist)):
entry_point = '{command} = rcli.dispatcher:main'.format(
command=command)
entry_points = dist.entry_points.setdefault('console_scripts', [])
if entry_point not in entry_points:
entry_points.append(entry_point)
dist.entry_points.setdefault('rcli', []).extend(subcommands) | Add autodetected commands as entry points.
Args:
dist: The distutils Distribution object for the project being
installed.
_: The keyword used in the setup function. Unused.
value: The value set to the keyword in the setup function. If the value
is not True, this function will do nothing. |
def egg_info_writer(cmd, basename, filename):
# type: (setuptools.command.egg_info.egg_info, str, str) -> None
setupcfg = next((f for f in setuptools.findall()
if os.path.basename(f) == 'setup.cfg'), None)
if not setupcfg:
return
parser = six.moves.configparser.ConfigParser() # type: ignore
parser.read(setupcfg)
if not parser.has_section('rcli') or not parser.items('rcli'):
return
config = dict(parser.items('rcli')) # type: typing.Dict[str, typing.Any]
for k, v in six.iteritems(config):
if v.lower() in ('y', 'yes', 'true'):
config[k] = True
elif v.lower() in ('n', 'no', 'false'):
config[k] = False
else:
try:
config[k] = json.loads(v)
except ValueError:
pass
cmd.write_file(basename, filename, json.dumps(config)) | Read rcli configuration and write it out to the egg info.
Args:
cmd: An egg info command instance to use for writing.
basename: The basename of the file to write.
filename: The full path of the file to write into the egg info. |
def _get_commands(dist # type: setuptools.dist.Distribution
):
# type: (...) -> typing.Dict[str, typing.Set[str]]
py_files = (f for f in setuptools.findall()
if os.path.splitext(f)[1].lower() == '.py')
pkg_files = (f for f in py_files if _get_package_name(f) in dist.packages)
commands = {} # type: typing.Dict[str, typing.Set[str]]
for file_name in pkg_files:
with open(file_name) as py_file:
module = typing.cast(ast.Module, ast.parse(py_file.read()))
module_name = _get_module_name(file_name)
_append_commands(commands, module_name, _get_module_commands(module))
_append_commands(commands, module_name, _get_class_commands(module))
_append_commands(commands, module_name, _get_function_commands(module))
return commands | Find all commands belonging to the given distribution.
Args:
dist: The Distribution to search for docopt-compatible docstrings that
can be used to generate command entry points.
Returns:
A dictionary containing a mapping of primary commands to sets of
subcommands. |
def _append_commands(dct, # type: typing.Dict[str, typing.Set[str]]
module_name, # type: str
commands # type:typing.Iterable[_EntryPoint]
):
# type: (...) -> None
for command in commands:
entry_point = '{command}{subcommand} = {module}{callable}'.format(
command=command.command,
subcommand=(':{}'.format(command.subcommand)
if command.subcommand else ''),
module=module_name,
callable=(':{}'.format(command.callable)
if command.callable else ''),
)
dct.setdefault(command.command, set()).add(entry_point) | Append entry point strings representing the given Command objects.
Args:
dct: The dictionary to append with entry point strings. Each key will
be a primary command with a value containing a list of entry point
strings representing a Command.
module_name: The name of the module in which the command object
resides.
commands: A list of Command objects to convert to entry point strings. |
def _get_module_commands(module):
# type: (ast.Module) -> typing.Generator[_EntryPoint, None, None]
cls = next((n for n in module.body
if isinstance(n, ast.ClassDef) and n.name == 'Command'), None)
if not cls:
return
methods = (n.name for n in cls.body if isinstance(n, ast.FunctionDef))
if '__call__' not in methods:
return
docstring = ast.get_docstring(module)
for commands, _ in usage.parse_commands(docstring):
yield _EntryPoint(commands[0], next(iter(commands[1:]), None), None) | Yield all Command objects represented by the python module.
Module commands consist of a docopt-style module docstring and a callable
Command class.
Args:
module: An ast.Module object used to retrieve docopt-style commands.
Yields:
Command objects that represent entry points to append to setup.py. |
def _get_function_commands(module):
# type: (ast.Module) -> typing.Generator[_EntryPoint, None, None]
nodes = (n for n in module.body if isinstance(n, ast.FunctionDef))
for func in nodes:
docstring = ast.get_docstring(func)
for commands, _ in usage.parse_commands(docstring):
yield _EntryPoint(commands[0], next(iter(commands[1:]), None),
func.name) | Yield all Command objects represented by python functions in the module.
Function commands consist of all top-level functions that contain
docopt-style docstrings.
Args:
module: An ast.Module object used to retrieve docopt-style commands.
Yields:
Command objects that represent entry points to append to setup.py. |
def convert(b):
'''
takes a number of bytes as an argument and returns the most suitable human
readable unit conversion.
'''
if b > 1024**3:
hr = round(b/1024**3)
unit = "GB"
elif b > 1024**2:
hr = round(b/1024**2)
unit = "MB"
else:
hr = round(b/1024)
unit = "KB"
return hr, unif convert(b):
'''
takes a number of bytes as an argument and returns the most suitable human
readable unit conversion.
'''
if b > 1024**3:
hr = round(b/1024**3)
unit = "GB"
elif b > 1024**2:
hr = round(b/1024**2)
unit = "MB"
else:
hr = round(b/1024)
unit = "KB"
return hr, unit | takes a number of bytes as an argument and returns the most suitable human
readable unit conversion. |
def calc(path):
'''
Takes a path as an argument and returns the total size in bytes of the file
or directory. If the path is a directory the size will be calculated
recursively.
'''
total = 0
err = None
if os.path.isdir(path):
try:
for entry in os.scandir(path):
try:
is_dir = entry.is_dir(follow_symlinks=False)
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
if is_dir:
result = calc(entry.path)
total += result[0]
err = result[1]
if err:
return total, err
else:
try:
total += entry.stat(follow_symlinks=False).st_size
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
else:
total += os.path.getsize(path)
return total, erf calc(path):
'''
Takes a path as an argument and returns the total size in bytes of the file
or directory. If the path is a directory the size will be calculated
recursively.
'''
total = 0
err = None
if os.path.isdir(path):
try:
for entry in os.scandir(path):
try:
is_dir = entry.is_dir(follow_symlinks=False)
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
if is_dir:
result = calc(entry.path)
total += result[0]
err = result[1]
if err:
return total, err
else:
try:
total += entry.stat(follow_symlinks=False).st_size
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
except (PermissionError, FileNotFoundError):
err = "!"
return total, err
else:
total += os.path.getsize(path)
return total, err | Takes a path as an argument and returns the total size in bytes of the file
or directory. If the path is a directory the size will be calculated
recursively. |
def du(path):
'''
Put it all together!
'''
size, err = calc(path)
if err:
return err
else:
hr, unit = convert(size)
hr = str(hr)
result = hr + " " + unit
return resulf du(path):
'''
Put it all together!
'''
size, err = calc(path)
if err:
return err
else:
hr, unit = convert(size)
hr = str(hr)
result = hr + " " + unit
return result | Put it all together! |
def handle(self, **kwargs):
for item in settings.ACTIVITY_MONITOR_MODELS:
app_label, model = item['model'].split('.', 1)
content_type = ContentType.objects.get(app_label=app_label, model=model)
model = content_type.model_class()
objects = model.objects.all()
for object in objects:
try:
object.save()
except Exception as e:
print("Error saving: {}".format(e)) | Simply re-saves all objects from models listed in settings.TIMELINE_MODELS. Since the timeline
app is now following these models, it will register each item as it is re-saved. The purpose of this
script is to register content in your database that existed prior to installing the timeline app. |
def album_primary_image_url(self):
'''The image of the album'''
path = '/Items/{}/Images/Primary'.format(self.album_id)
return self.connector.get_url(path, attach_api_key=Falsef album_primary_image_url(self):
'''The image of the album'''
path = '/Items/{}/Images/Primary'.format(self.album_id)
return self.connector.get_url(path, attach_api_key=False) | The image of the album |
def stream_url(self):
'''stream for this song - not re-encoded'''
path = '/Audio/{}/universal'.format(self.id)
return self.connector.get_url(path,
userId=self.connector.userid,
MaxStreamingBitrate=140000000,
Container='opus',
TranscodingContainer='opus',
AudioCodec='opus',
MaxSampleRate=48000,
PlaySessionId=1496213367201 #TODO no hard code
f stream_url(self):
'''stream for this song - not re-encoded'''
path = '/Audio/{}/universal'.format(self.id)
return self.connector.get_url(path,
userId=self.connector.userid,
MaxStreamingBitrate=140000000,
Container='opus',
TranscodingContainer='opus',
AudioCodec='opus',
MaxSampleRate=48000,
PlaySessionId=1496213367201 #TODO no hard code
) | stream for this song - not re-encoded |
def data(self):
if self._data is None:
request = urllib.Request(self._url, headers={'User-Agent': USER_AGENT})
with contextlib.closing(self._connection.urlopen(request)) as response:
self._data = response.read()
return self._data | raw image data |
def make_filter_string(cls, filter_specification):
registry = get_current_registry()
visitor_cls = registry.getUtility(IFilterSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
filter_specification.accept(visitor)
return str(visitor.expression) | Converts the given filter specification to a CQL filter expression. |
def make_order_string(cls, order_specification):
registry = get_current_registry()
visitor_cls = registry.getUtility(IOrderSpecificationVisitor,
name=EXPRESSION_KINDS.CQL)
visitor = visitor_cls()
order_specification.accept(visitor)
return str(visitor.expression) | Converts the given order specification to a CQL order expression. |
def make_slice_key(cls, start_string, size_string):
try:
start = int(start_string)
except ValueError:
raise ValueError('Query parameter "start" must be a number.')
if start < 0:
raise ValueError('Query parameter "start" must be zero or '
'a positive number.')
try:
size = int(size_string)
except ValueError:
raise ValueError('Query parameter "size" must be a number.')
if size < 1:
raise ValueError('Query parameter "size" must be a positive '
'number.')
return slice(start, start + size) | Converts the given start and size query parts to a slice key.
:return: slice key
:rtype: slice |
def make_slice_strings(cls, slice_key):
start = slice_key.start
size = slice_key.stop - start
return (str(start), str(size)) | Converts the given slice key to start and size query parts. |
def match(self, expression=None, xpath=None, namespaces=None):
class MatchObject(Dict):
pass
def _match(function):
self.matches.append(
MatchObject(expression=expression, xpath=xpath, function=function, namespaces=namespaces))
def wrapper(self, *args, **params):
return function(self, *args, **params)
return wrapper
return _match | decorator that allows us to match by expression or by xpath for each transformation method |
def get_match(self, elem):
for m in self.matches:
if (m.expression is not None and eval(m.expression)==True) \
or (m.xpath is not None and len(elem.xpath(m.xpath, namespaces=m.namespaces)) > 0):
LOG.debug("=> match: %r" % m.expression)
return m | for the given elem, return the @match function that will be applied |
def Element(self, elem, **params):
res = self.__call__(deepcopy(elem), **params)
if len(res) > 0:
return res[0]
else:
return None | Ensure that the input element is immutable by the transformation. Returns a single element. |
def read_properties(filename):
s = path(filename).text()
dummy_section = 'xxx'
cfgparser = configparser.RawConfigParser()
# avoid converting options to lower case
cfgparser.optionxform = str
cfgparser.readfp(StringIO('[%s]\n' % dummy_section + s))
bunch = AutoBunch()
for x in cfgparser.options(dummy_section):
setattr(bunch, x, cfgparser.get(dummy_section, str(x)))
return bunch | read properties file into bunch.
:param filename: string
:rtype: bunch (dict like and object like) |
def clean_dir(root):
'''remove .* and _* files and directories under root'''
for x in root.walkdirs('.*', errors='ignore'):
x.rmtree()
for x in root.walkdirs('_*', errors='ignore'):
x.rmtree()
for x in root.walkfiles('.*', errors='ignore'):
x.remove()
for x in root.walkfiles('_*', errors='ignore'):
x.remove(f clean_dir(root):
'''remove .* and _* files and directories under root'''
for x in root.walkdirs('.*', errors='ignore'):
x.rmtree()
for x in root.walkdirs('_*', errors='ignore'):
x.rmtree()
for x in root.walkfiles('.*', errors='ignore'):
x.remove()
for x in root.walkfiles('_*', errors='ignore'):
x.remove() | remove .* and _* files and directories under root |
def create_request_url(self, interface, method, version, parameters):
if 'format' in parameters:
parameters['key'] = self.apikey
else:
parameters.update({'key' : self.apikey, 'format' : self.format})
version = "v%04d" % (version)
url = "http://api.steampowered.com/%s/%s/%s/?%s" % (interface, method,
version, urlencode(parameters))
return url | Create the URL to submit to the Steam Web API
interface: Steam Web API interface containing methods.
method: The method to call.
version: The version of the method.
paramters: Parameters to supply to the method. |
def retrieve_request(self, url):
try:
data = urlopen(url)
except:
print("Error Retrieving Data from Steam")
sys.exit(2)
return data.read().decode('utf-8') | Open the given url and decode and return the response
url: The url to open. |
def return_data(self, data, format=None):
if format is None:
format = self.format
if format == "json":
formatted_data = json.loads(data)
else:
formatted_data = data
return formatted_data | Format and return data appropriate to the requested API format.
data: The data retured by the api request |
def get_friends_list(self, steamID, relationship='all', format=None):
parameters = {'steamid' : steamID, 'relationship' : relationship}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetFriendsList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the friends list of a given steam ID filtered by role.
steamID: The user ID
relationship: Type of friend to request (all, friend)
format: Return format. None defaults to json. (json, xml, vdf) |
def get_player_bans(self, steamIDS, format=None):
parameters = {'steamids' : steamIDS}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetPlayerBans', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the communities a steam id is banned in.
steamIDS: Comma-delimited list of SteamIDs
format: Return format. None defaults to json. (json, xml, vdf) |
def get_user_group_list(self, steamID, format=None):
parameters = {'steamid' : steamID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserGroupList', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of groups a user is subscribed to.
steamID: User ID
format: Return format. None defaults to json. (json, xml, vdf) |
def resolve_vanity_url(self, vanityURL, url_type=1, format=None):
parameters = {'vanityurl' : vanityURL, "url_type" : url_type}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'ResolveVanityUrl', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the steam id associated with a vanity url.
vanityURL: The users vanity URL
url_type: The type of vanity URL. 1 (default): Individual profile,
2: Group, 3: Official game group
format: Return format. None defaults to json. (json, xml, vdf) |
def get_global_achievement_percentages_for_app(self, gameID, format=None):
parameters = {'gameid' : gameID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetGlobalAchievementPercentagesForApp', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request statistics showing global achievements that have been
unlocked.
gameID: The id of the game.
format: Return format. None defaults to json. (json, xml, vdf) |
def get_global_stats_for_game(self, appID, count, names, startdate,
enddate, format=None):
parameters = {
'appid' : appID,
'count' : count,
'startdate' : startdate,
'enddate' : enddate
}
count = 0
for name in names:
param = "name[" + str(count) + "]"
parameters[param] = name
count += 1
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetGlobalStatsForGame', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request global stats for a give game.
appID: The app ID
count: Number of stats to get.
names: A list of names of stats to get.
startdate: The start time to gather stats. Unix timestamp
enddate: The end time to gather stats. Unix timestamp
format: Return format. None defaults to json. (json, xml, vdf) |
def get_number_of_current_players(self, appID, format=None):
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface,
'GetNumberOfCurrentPlayers', 1, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the current number of players for a given app.
appID: The app ID
format: Return format. None defaults to json. (json, xml, vdf) |
def get_player_achievements(self, steamID, appID, language=None,
format=None):
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetPlayerAchievements', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the achievements for a given app and steam id.
steamID: Users steam ID
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf) |
def get_schema_for_game(self, appID, language=None, format=None):
parameters = {'appid' : appID}
if format is not None:
parameters['format'] = format
if language is not None:
parameters['l'] = language
else:
parameters['l'] = self.language
url = self.create_request_url(self.interface, 'GetSchemaForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the available achievements and stats for a game.
appID: The app id
language: The language to return the results in. None uses default.
format: Return format. None defaults to json. (json, xml, vdf) |
def get_user_stats_for_game(self, steamID, appID, format=None):
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf) |
def get_recently_played_games(self, steamID, count=0, format=None):
parameters = {'steamid' : steamID, 'count' : count}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetRecentlyPlayedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of recently played games by a given steam id.
steamID: The users ID
count: Number of games to return. (0 is all recent games.)
format: Return format. None defaults to json. (json, xml, vdf) |
def get_owned_games(self, steamID, include_appinfo=1,
include_played_free_games=0, appids_filter=None, format=None):
parameters = {
'steamid' : steamID,
'include_appinfo' : include_appinfo,
'include_played_free_games' : include_played_free_games
}
if format is not None:
parameters['format'] = format
if appids_filter is not None:
parameters['appids_filter'] = appids_filter
url = self.create_request_url(self.interface, 'GetOwnedGames', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Request a list of games owned by a given steam id.
steamID: The users id
include_appinfo: boolean.
include_played_free_games: boolean.
appids_filter: a json encoded list of app ids.
format: Return format. None defaults to json. (json, xml, vdf) |
def get_community_badge_progress(self, steamID, badgeID, format=None):
parameters = {'steamid' : steamID, 'badgeid' : badgeID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetCommunityBadgeProgress', 1,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format) | Gets all the quests needed to get the specified badge, and which are completed.
steamID: The users ID
badgeID: The badge we're asking about
format: Return format. None defaults to json. (json, xml, vdf) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.