id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
248,700 | dossier/dossier.web | dossier/web/label_folders.py | nub | def nub(it):
'''Dedups an iterable in arbitrary order.
Uses memory proportional to the number of unique items in ``it``.
'''
seen = set()
for v in it:
h = hash(v)
if h in seen:
continue
seen.add(h)
yield v | python | def nub(it):
'''Dedups an iterable in arbitrary order.
Uses memory proportional to the number of unique items in ``it``.
'''
seen = set()
for v in it:
h = hash(v)
if h in seen:
continue
seen.add(h)
yield v | [
"def",
"nub",
"(",
"it",
")",
":",
"seen",
"=",
"set",
"(",
")",
"for",
"v",
"in",
"it",
":",
"h",
"=",
"hash",
"(",
"v",
")",
"if",
"h",
"in",
"seen",
":",
"continue",
"seen",
".",
"add",
"(",
"h",
")",
"yield",
"v"
] | Dedups an iterable in arbitrary order.
Uses memory proportional to the number of unique items in ``it``. | [
"Dedups",
"an",
"iterable",
"in",
"arbitrary",
"order",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L292-L303 |
248,701 | dossier/dossier.web | dossier/web/label_folders.py | Folders.folders | def folders(self, ann_id=None):
'''Yields an unordered generator for all available folders.
By default (with ``ann_id=None``), folders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only folders owned by that user.
:param str ann_id: Username
:rtype: generator of folder_id
'''
ann_id = self._annotator(ann_id)
if len(self.prefix) > 0:
prefix = '|'.join([urllib.quote(self.prefix, safe='~'),
'topic', ann_id, ''])
else:
prefix = '|'.join(['topic', ann_id, ''])
logger.info('Scanning for folders with prefix %r', prefix)
return imap(lambda id: self.unwrap_folder_content_id(id)['folder_id'],
self.store.scan_prefix_ids(prefix)) | python | def folders(self, ann_id=None):
'''Yields an unordered generator for all available folders.
By default (with ``ann_id=None``), folders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only folders owned by that user.
:param str ann_id: Username
:rtype: generator of folder_id
'''
ann_id = self._annotator(ann_id)
if len(self.prefix) > 0:
prefix = '|'.join([urllib.quote(self.prefix, safe='~'),
'topic', ann_id, ''])
else:
prefix = '|'.join(['topic', ann_id, ''])
logger.info('Scanning for folders with prefix %r', prefix)
return imap(lambda id: self.unwrap_folder_content_id(id)['folder_id'],
self.store.scan_prefix_ids(prefix)) | [
"def",
"folders",
"(",
"self",
",",
"ann_id",
"=",
"None",
")",
":",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"if",
"len",
"(",
"self",
".",
"prefix",
")",
">",
"0",
":",
"prefix",
"=",
"'|'",
".",
"join",
"(",
"[",
"urllib",
".",
"quote",
"(",
"self",
".",
"prefix",
",",
"safe",
"=",
"'~'",
")",
",",
"'topic'",
",",
"ann_id",
",",
"''",
"]",
")",
"else",
":",
"prefix",
"=",
"'|'",
".",
"join",
"(",
"[",
"'topic'",
",",
"ann_id",
",",
"''",
"]",
")",
"logger",
".",
"info",
"(",
"'Scanning for folders with prefix %r'",
",",
"prefix",
")",
"return",
"imap",
"(",
"lambda",
"id",
":",
"self",
".",
"unwrap_folder_content_id",
"(",
"id",
")",
"[",
"'folder_id'",
"]",
",",
"self",
".",
"store",
".",
"scan_prefix_ids",
"(",
"prefix",
")",
")"
] | Yields an unordered generator for all available folders.
By default (with ``ann_id=None``), folders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only folders owned by that user.
:param str ann_id: Username
:rtype: generator of folder_id | [
"Yields",
"an",
"unordered",
"generator",
"for",
"all",
"available",
"folders",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L92-L110 |
248,702 | dossier/dossier.web | dossier/web/label_folders.py | Folders.subfolders | def subfolders(self, folder_id, ann_id=None):
'''Yields an unodered generator of subfolders in a folder.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str ann_id: Username
:rtype: generator of subfolder_id
'''
self.assert_valid_folder_id(folder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
all_labels = self.label_store.directly_connected(folder_cid)
return nub(la.subtopic_for(folder_cid) for la in all_labels) | python | def subfolders(self, folder_id, ann_id=None):
'''Yields an unodered generator of subfolders in a folder.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str ann_id: Username
:rtype: generator of subfolder_id
'''
self.assert_valid_folder_id(folder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
all_labels = self.label_store.directly_connected(folder_cid)
return nub(la.subtopic_for(folder_cid) for la in all_labels) | [
"def",
"subfolders",
"(",
"self",
",",
"folder_id",
",",
"ann_id",
"=",
"None",
")",
":",
"self",
".",
"assert_valid_folder_id",
"(",
"folder_id",
")",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"folder_cid",
"=",
"self",
".",
"wrap_folder_content_id",
"(",
"ann_id",
",",
"folder_id",
")",
"if",
"self",
".",
"store",
".",
"get",
"(",
"folder_cid",
")",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"folder_id",
")",
"all_labels",
"=",
"self",
".",
"label_store",
".",
"directly_connected",
"(",
"folder_cid",
")",
"return",
"nub",
"(",
"la",
".",
"subtopic_for",
"(",
"folder_cid",
")",
"for",
"la",
"in",
"all_labels",
")"
] | Yields an unodered generator of subfolders in a folder.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str ann_id: Username
:rtype: generator of subfolder_id | [
"Yields",
"an",
"unodered",
"generator",
"of",
"subfolders",
"in",
"a",
"folder",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L112-L129 |
248,703 | dossier/dossier.web | dossier/web/label_folders.py | Folders.parent_subfolders | def parent_subfolders(self, ident, ann_id=None):
'''An unordered generator of parent subfolders for ``ident``.
``ident`` can either be a ``content_id`` or a tuple of
``(content_id, subtopic_id)``.
Parent subfolders are limited to the annotator id given.
:param ident: identifier
:type ident: ``str`` or ``(str, str)``
:param str ann_id: Username
:rtype: generator of ``(folder_id, subfolder_id)``
'''
ann_id = self._annotator(ann_id)
cid, _ = normalize_ident(ident)
for lab in self.label_store.directly_connected(ident):
folder_cid = lab.other(cid)
subfolder_sid = lab.subtopic_for(folder_cid)
if not folder_cid.startswith('topic|'):
continue
folder = self.unwrap_folder_content_id(folder_cid)
subfolder = self.unwrap_subfolder_subtopic_id(subfolder_sid)
if folder['annotator_id'] != ann_id:
continue
yield (folder['folder_id'], subfolder) | python | def parent_subfolders(self, ident, ann_id=None):
'''An unordered generator of parent subfolders for ``ident``.
``ident`` can either be a ``content_id`` or a tuple of
``(content_id, subtopic_id)``.
Parent subfolders are limited to the annotator id given.
:param ident: identifier
:type ident: ``str`` or ``(str, str)``
:param str ann_id: Username
:rtype: generator of ``(folder_id, subfolder_id)``
'''
ann_id = self._annotator(ann_id)
cid, _ = normalize_ident(ident)
for lab in self.label_store.directly_connected(ident):
folder_cid = lab.other(cid)
subfolder_sid = lab.subtopic_for(folder_cid)
if not folder_cid.startswith('topic|'):
continue
folder = self.unwrap_folder_content_id(folder_cid)
subfolder = self.unwrap_subfolder_subtopic_id(subfolder_sid)
if folder['annotator_id'] != ann_id:
continue
yield (folder['folder_id'], subfolder) | [
"def",
"parent_subfolders",
"(",
"self",
",",
"ident",
",",
"ann_id",
"=",
"None",
")",
":",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"cid",
",",
"_",
"=",
"normalize_ident",
"(",
"ident",
")",
"for",
"lab",
"in",
"self",
".",
"label_store",
".",
"directly_connected",
"(",
"ident",
")",
":",
"folder_cid",
"=",
"lab",
".",
"other",
"(",
"cid",
")",
"subfolder_sid",
"=",
"lab",
".",
"subtopic_for",
"(",
"folder_cid",
")",
"if",
"not",
"folder_cid",
".",
"startswith",
"(",
"'topic|'",
")",
":",
"continue",
"folder",
"=",
"self",
".",
"unwrap_folder_content_id",
"(",
"folder_cid",
")",
"subfolder",
"=",
"self",
".",
"unwrap_subfolder_subtopic_id",
"(",
"subfolder_sid",
")",
"if",
"folder",
"[",
"'annotator_id'",
"]",
"!=",
"ann_id",
":",
"continue",
"yield",
"(",
"folder",
"[",
"'folder_id'",
"]",
",",
"subfolder",
")"
] | An unordered generator of parent subfolders for ``ident``.
``ident`` can either be a ``content_id`` or a tuple of
``(content_id, subtopic_id)``.
Parent subfolders are limited to the annotator id given.
:param ident: identifier
:type ident: ``str`` or ``(str, str)``
:param str ann_id: Username
:rtype: generator of ``(folder_id, subfolder_id)`` | [
"An",
"unordered",
"generator",
"of",
"parent",
"subfolders",
"for",
"ident",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L131-L155 |
248,704 | dossier/dossier.web | dossier/web/label_folders.py | Folders.items | def items(self, folder_id, subfolder_id, ann_id=None):
'''Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)``
'''
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
ident = (folder_cid, subfolder_sid)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
for lab in self.label_store.directly_connected(ident):
cid = lab.other(folder_cid)
subid = lab.subtopic_for(cid)
yield (cid, subid) | python | def items(self, folder_id, subfolder_id, ann_id=None):
'''Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)``
'''
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
ident = (folder_cid, subfolder_sid)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
for lab in self.label_store.directly_connected(ident):
cid = lab.other(folder_cid)
subid = lab.subtopic_for(cid)
yield (cid, subid) | [
"def",
"items",
"(",
"self",
",",
"folder_id",
",",
"subfolder_id",
",",
"ann_id",
"=",
"None",
")",
":",
"self",
".",
"assert_valid_folder_id",
"(",
"folder_id",
")",
"self",
".",
"assert_valid_folder_id",
"(",
"subfolder_id",
")",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"folder_cid",
"=",
"self",
".",
"wrap_folder_content_id",
"(",
"ann_id",
",",
"folder_id",
")",
"subfolder_sid",
"=",
"self",
".",
"wrap_subfolder_subtopic_id",
"(",
"subfolder_id",
")",
"ident",
"=",
"(",
"folder_cid",
",",
"subfolder_sid",
")",
"if",
"self",
".",
"store",
".",
"get",
"(",
"folder_cid",
")",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"folder_id",
")",
"for",
"lab",
"in",
"self",
".",
"label_store",
".",
"directly_connected",
"(",
"ident",
")",
":",
"cid",
"=",
"lab",
".",
"other",
"(",
"folder_cid",
")",
"subid",
"=",
"lab",
".",
"subtopic_for",
"(",
"cid",
")",
"yield",
"(",
"cid",
",",
"subid",
")"
] | Yields an unodered generator of items in a subfolder.
The generator yields items, which are represented by a tuple
of ``content_id`` and ``subtopic_id``. The format of these
identifiers is unspecified.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: generator of ``(content_id, subtopic_id)`` | [
"Yields",
"an",
"unodered",
"generator",
"of",
"items",
"in",
"a",
"subfolder",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L157-L185 |
248,705 | dossier/dossier.web | dossier/web/label_folders.py | Folders.grouped_items | def grouped_items(self, folder_id, subfolder_id, ann_id=None):
'''Returns a dictionary from content ids to subtopic ids.
Namely, the mapping is ``content_id |--> list of subtopic id``.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: ``dict`` of ``content_id |--> [subtopic_id]``
'''
d = defaultdict(list)
for cid, subid in self.items(folder_id, subfolder_id, ann_id=ann_id):
d[cid].append(subid)
return d | python | def grouped_items(self, folder_id, subfolder_id, ann_id=None):
'''Returns a dictionary from content ids to subtopic ids.
Namely, the mapping is ``content_id |--> list of subtopic id``.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: ``dict`` of ``content_id |--> [subtopic_id]``
'''
d = defaultdict(list)
for cid, subid in self.items(folder_id, subfolder_id, ann_id=ann_id):
d[cid].append(subid)
return d | [
"def",
"grouped_items",
"(",
"self",
",",
"folder_id",
",",
"subfolder_id",
",",
"ann_id",
"=",
"None",
")",
":",
"d",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"cid",
",",
"subid",
"in",
"self",
".",
"items",
"(",
"folder_id",
",",
"subfolder_id",
",",
"ann_id",
"=",
"ann_id",
")",
":",
"d",
"[",
"cid",
"]",
".",
"append",
"(",
"subid",
")",
"return",
"d"
] | Returns a dictionary from content ids to subtopic ids.
Namely, the mapping is ``content_id |--> list of subtopic id``.
By default (with ``ann_id=None``), subfolders are shown for all
anonymous users. Optionally, ``ann_id`` can be set to a username,
which restricts the list to only subfolders owned by that user.
:param str folder_id: Folder id
:param str subfolder_id: Subfolder id
:param str ann_id: Username
:rtype: ``dict`` of ``content_id |--> [subtopic_id]`` | [
"Returns",
"a",
"dictionary",
"from",
"content",
"ids",
"to",
"subtopic",
"ids",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L187-L204 |
248,706 | dossier/dossier.web | dossier/web/label_folders.py | Folders.add_folder | def add_folder(self, folder_id, ann_id=None):
'''Add a folder.
If ``ann_id`` is set, then the folder is owned by the given user.
Otherwise, the folder is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str ann_id: Username
'''
self.assert_valid_folder_id(folder_id)
ann_id = self._annotator(ann_id)
cid = self.wrap_folder_content_id(ann_id, folder_id)
self.store.put([(cid, FeatureCollection())])
logger.info('Added folder %r with content id %r', folder_id, cid) | python | def add_folder(self, folder_id, ann_id=None):
'''Add a folder.
If ``ann_id`` is set, then the folder is owned by the given user.
Otherwise, the folder is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str ann_id: Username
'''
self.assert_valid_folder_id(folder_id)
ann_id = self._annotator(ann_id)
cid = self.wrap_folder_content_id(ann_id, folder_id)
self.store.put([(cid, FeatureCollection())])
logger.info('Added folder %r with content id %r', folder_id, cid) | [
"def",
"add_folder",
"(",
"self",
",",
"folder_id",
",",
"ann_id",
"=",
"None",
")",
":",
"self",
".",
"assert_valid_folder_id",
"(",
"folder_id",
")",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"cid",
"=",
"self",
".",
"wrap_folder_content_id",
"(",
"ann_id",
",",
"folder_id",
")",
"self",
".",
"store",
".",
"put",
"(",
"[",
"(",
"cid",
",",
"FeatureCollection",
"(",
")",
")",
"]",
")",
"logger",
".",
"info",
"(",
"'Added folder %r with content id %r'",
",",
"folder_id",
",",
"cid",
")"
] | Add a folder.
If ``ann_id`` is set, then the folder is owned by the given user.
Otherwise, the folder is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str ann_id: Username | [
"Add",
"a",
"folder",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L206-L220 |
248,707 | dossier/dossier.web | dossier/web/label_folders.py | Folders.add_item | def add_item(self, folder_id, subfolder_id, content_id, subtopic_id=None,
ann_id=None):
'''Add an item to a subfolder.
The format of ``content_id`` and ``subtopic_id`` is
unspecified. It is application specific.
If ``ann_id`` is set, then the item is owned by the given user.
Otherwise, the item is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str subfolder_id: Folder id
:param str content_id: content identifier
:param str subtopic_id: subtopic identifier
:param str ann_id: Username
'''
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
lab = Label(folder_cid, content_id,
ann_id, CorefValue.Positive,
subtopic_id1=subfolder_sid,
subtopic_id2=subtopic_id)
self.label_store.put(lab)
logger.info('Added subfolder item: %r', lab) | python | def add_item(self, folder_id, subfolder_id, content_id, subtopic_id=None,
ann_id=None):
'''Add an item to a subfolder.
The format of ``content_id`` and ``subtopic_id`` is
unspecified. It is application specific.
If ``ann_id`` is set, then the item is owned by the given user.
Otherwise, the item is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str subfolder_id: Folder id
:param str content_id: content identifier
:param str subtopic_id: subtopic identifier
:param str ann_id: Username
'''
self.assert_valid_folder_id(folder_id)
self.assert_valid_folder_id(subfolder_id)
ann_id = self._annotator(ann_id)
folder_cid = self.wrap_folder_content_id(ann_id, folder_id)
subfolder_sid = self.wrap_subfolder_subtopic_id(subfolder_id)
if self.store.get(folder_cid) is None:
raise KeyError(folder_id)
lab = Label(folder_cid, content_id,
ann_id, CorefValue.Positive,
subtopic_id1=subfolder_sid,
subtopic_id2=subtopic_id)
self.label_store.put(lab)
logger.info('Added subfolder item: %r', lab) | [
"def",
"add_item",
"(",
"self",
",",
"folder_id",
",",
"subfolder_id",
",",
"content_id",
",",
"subtopic_id",
"=",
"None",
",",
"ann_id",
"=",
"None",
")",
":",
"self",
".",
"assert_valid_folder_id",
"(",
"folder_id",
")",
"self",
".",
"assert_valid_folder_id",
"(",
"subfolder_id",
")",
"ann_id",
"=",
"self",
".",
"_annotator",
"(",
"ann_id",
")",
"folder_cid",
"=",
"self",
".",
"wrap_folder_content_id",
"(",
"ann_id",
",",
"folder_id",
")",
"subfolder_sid",
"=",
"self",
".",
"wrap_subfolder_subtopic_id",
"(",
"subfolder_id",
")",
"if",
"self",
".",
"store",
".",
"get",
"(",
"folder_cid",
")",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"folder_id",
")",
"lab",
"=",
"Label",
"(",
"folder_cid",
",",
"content_id",
",",
"ann_id",
",",
"CorefValue",
".",
"Positive",
",",
"subtopic_id1",
"=",
"subfolder_sid",
",",
"subtopic_id2",
"=",
"subtopic_id",
")",
"self",
".",
"label_store",
".",
"put",
"(",
"lab",
")",
"logger",
".",
"info",
"(",
"'Added subfolder item: %r'",
",",
"lab",
")"
] | Add an item to a subfolder.
The format of ``content_id`` and ``subtopic_id`` is
unspecified. It is application specific.
If ``ann_id`` is set, then the item is owned by the given user.
Otherwise, the item is owned and viewable by all anonymous
users.
:param str folder_id: Folder id
:param str subfolder_id: Folder id
:param str content_id: content identifier
:param str subtopic_id: subtopic identifier
:param str ann_id: Username | [
"Add",
"an",
"item",
"to",
"a",
"subfolder",
"."
] | 1cad1cce3c37d3a4e956abc710a2bc1afe16a092 | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/label_folders.py#L222-L253 |
248,708 | treycucco/bidon | lib/generate_models.py | generate_models | def generate_models(args):
"""Generates models from the script input."""
data_table = get_data_table(args.filename)
tables = to_tables(data_table.rows_to_dicts())
attr_indent = "\n" + args.indent * 2
attr_sep = "," + attr_indent
for tname, cols in tables.items():
model_name = table_to_model_name(tname, list(cols.values())[0]["table_schema"])
pk_cols, oth_cols = split_pks(cols)
timestamps = get_timestamps(cols, args.created_at_col_name, args.updated_at_col_name)
is_auto = len(pk_cols) == 1 and cols[pk_cols[0]]["is_auto"] == "t"
attrs = OrderedDict()
for cname in oth_cols:
if cname not in timestamps:
attrs[cname] = None
print(_MODEL_SOURCE.format(
class_name=model_name,
base_class_name="ModelBase",
indent=args.indent,
table_name=repr(tname),
pk_name=repr(pk_cols[0] if len(pk_cols) == 1 else pk_cols),
pk_is_auto=is_auto,
timestamps=timestamps,
attrs="dict(" + attr_indent + attr_sep.join("{0}={1}".format(k, v) for k, v in attrs.items()) + ")"))
print() | python | def generate_models(args):
"""Generates models from the script input."""
data_table = get_data_table(args.filename)
tables = to_tables(data_table.rows_to_dicts())
attr_indent = "\n" + args.indent * 2
attr_sep = "," + attr_indent
for tname, cols in tables.items():
model_name = table_to_model_name(tname, list(cols.values())[0]["table_schema"])
pk_cols, oth_cols = split_pks(cols)
timestamps = get_timestamps(cols, args.created_at_col_name, args.updated_at_col_name)
is_auto = len(pk_cols) == 1 and cols[pk_cols[0]]["is_auto"] == "t"
attrs = OrderedDict()
for cname in oth_cols:
if cname not in timestamps:
attrs[cname] = None
print(_MODEL_SOURCE.format(
class_name=model_name,
base_class_name="ModelBase",
indent=args.indent,
table_name=repr(tname),
pk_name=repr(pk_cols[0] if len(pk_cols) == 1 else pk_cols),
pk_is_auto=is_auto,
timestamps=timestamps,
attrs="dict(" + attr_indent + attr_sep.join("{0}={1}".format(k, v) for k, v in attrs.items()) + ")"))
print() | [
"def",
"generate_models",
"(",
"args",
")",
":",
"data_table",
"=",
"get_data_table",
"(",
"args",
".",
"filename",
")",
"tables",
"=",
"to_tables",
"(",
"data_table",
".",
"rows_to_dicts",
"(",
")",
")",
"attr_indent",
"=",
"\"\\n\"",
"+",
"args",
".",
"indent",
"*",
"2",
"attr_sep",
"=",
"\",\"",
"+",
"attr_indent",
"for",
"tname",
",",
"cols",
"in",
"tables",
".",
"items",
"(",
")",
":",
"model_name",
"=",
"table_to_model_name",
"(",
"tname",
",",
"list",
"(",
"cols",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"[",
"\"table_schema\"",
"]",
")",
"pk_cols",
",",
"oth_cols",
"=",
"split_pks",
"(",
"cols",
")",
"timestamps",
"=",
"get_timestamps",
"(",
"cols",
",",
"args",
".",
"created_at_col_name",
",",
"args",
".",
"updated_at_col_name",
")",
"is_auto",
"=",
"len",
"(",
"pk_cols",
")",
"==",
"1",
"and",
"cols",
"[",
"pk_cols",
"[",
"0",
"]",
"]",
"[",
"\"is_auto\"",
"]",
"==",
"\"t\"",
"attrs",
"=",
"OrderedDict",
"(",
")",
"for",
"cname",
"in",
"oth_cols",
":",
"if",
"cname",
"not",
"in",
"timestamps",
":",
"attrs",
"[",
"cname",
"]",
"=",
"None",
"print",
"(",
"_MODEL_SOURCE",
".",
"format",
"(",
"class_name",
"=",
"model_name",
",",
"base_class_name",
"=",
"\"ModelBase\"",
",",
"indent",
"=",
"args",
".",
"indent",
",",
"table_name",
"=",
"repr",
"(",
"tname",
")",
",",
"pk_name",
"=",
"repr",
"(",
"pk_cols",
"[",
"0",
"]",
"if",
"len",
"(",
"pk_cols",
")",
"==",
"1",
"else",
"pk_cols",
")",
",",
"pk_is_auto",
"=",
"is_auto",
",",
"timestamps",
"=",
"timestamps",
",",
"attrs",
"=",
"\"dict(\"",
"+",
"attr_indent",
"+",
"attr_sep",
".",
"join",
"(",
"\"{0}={1}\"",
".",
"format",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"items",
"(",
")",
")",
"+",
"\")\"",
")",
")",
"print",
"(",
")"
] | Generates models from the script input. | [
"Generates",
"models",
"from",
"the",
"script",
"input",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L55-L82 |
248,709 | treycucco/bidon | lib/generate_models.py | get_data_table | def get_data_table(filename):
"""Returns a DataTable instance built from either the filename, or STDIN if filename is None."""
with get_file_object(filename, "r") as rf:
return DataTable(list(csv.reader(rf))) | python | def get_data_table(filename):
"""Returns a DataTable instance built from either the filename, or STDIN if filename is None."""
with get_file_object(filename, "r") as rf:
return DataTable(list(csv.reader(rf))) | [
"def",
"get_data_table",
"(",
"filename",
")",
":",
"with",
"get_file_object",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"rf",
":",
"return",
"DataTable",
"(",
"list",
"(",
"csv",
".",
"reader",
"(",
"rf",
")",
")",
")"
] | Returns a DataTable instance built from either the filename, or STDIN if filename is None. | [
"Returns",
"a",
"DataTable",
"instance",
"built",
"from",
"either",
"the",
"filename",
"or",
"STDIN",
"if",
"filename",
"is",
"None",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L85-L88 |
248,710 | treycucco/bidon | lib/generate_models.py | to_tables | def to_tables(cols):
"""Builds and returns a Dictionary whose keys are table names and values are OrderedDicts whose
keys are column names and values are the col objects from which the definition is derived.
"""
tables = OrderedDict()
for col in cols:
tname = col["table_name"]
if tname not in tables:
tables[tname] = OrderedDict()
tables[tname][col["column_name"]] = col
return tables | python | def to_tables(cols):
"""Builds and returns a Dictionary whose keys are table names and values are OrderedDicts whose
keys are column names and values are the col objects from which the definition is derived.
"""
tables = OrderedDict()
for col in cols:
tname = col["table_name"]
if tname not in tables:
tables[tname] = OrderedDict()
tables[tname][col["column_name"]] = col
return tables | [
"def",
"to_tables",
"(",
"cols",
")",
":",
"tables",
"=",
"OrderedDict",
"(",
")",
"for",
"col",
"in",
"cols",
":",
"tname",
"=",
"col",
"[",
"\"table_name\"",
"]",
"if",
"tname",
"not",
"in",
"tables",
":",
"tables",
"[",
"tname",
"]",
"=",
"OrderedDict",
"(",
")",
"tables",
"[",
"tname",
"]",
"[",
"col",
"[",
"\"column_name\"",
"]",
"]",
"=",
"col",
"return",
"tables"
] | Builds and returns a Dictionary whose keys are table names and values are OrderedDicts whose
keys are column names and values are the col objects from which the definition is derived. | [
"Builds",
"and",
"returns",
"a",
"Dictionary",
"whose",
"keys",
"are",
"table",
"names",
"and",
"values",
"are",
"OrderedDicts",
"whose",
"keys",
"are",
"column",
"names",
"and",
"values",
"are",
"the",
"col",
"objects",
"from",
"which",
"the",
"definition",
"is",
"derived",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L91-L101 |
248,711 | treycucco/bidon | lib/generate_models.py | snake_to_pascal | def snake_to_pascal(name, singularize=False):
"""Converts snake_case to PascalCase. If singularize is True, an attempt is made at singularizing
each part of the resulting name.
"""
parts = name.split("_")
if singularize:
return "".join(p.upper() if p in _ALL_CAPS else to_singular(p.title()) for p in parts)
else:
return "".join(p.upper() if p in _ALL_CAPS else p.title() for p in parts) | python | def snake_to_pascal(name, singularize=False):
"""Converts snake_case to PascalCase. If singularize is True, an attempt is made at singularizing
each part of the resulting name.
"""
parts = name.split("_")
if singularize:
return "".join(p.upper() if p in _ALL_CAPS else to_singular(p.title()) for p in parts)
else:
return "".join(p.upper() if p in _ALL_CAPS else p.title() for p in parts) | [
"def",
"snake_to_pascal",
"(",
"name",
",",
"singularize",
"=",
"False",
")",
":",
"parts",
"=",
"name",
".",
"split",
"(",
"\"_\"",
")",
"if",
"singularize",
":",
"return",
"\"\"",
".",
"join",
"(",
"p",
".",
"upper",
"(",
")",
"if",
"p",
"in",
"_ALL_CAPS",
"else",
"to_singular",
"(",
"p",
".",
"title",
"(",
")",
")",
"for",
"p",
"in",
"parts",
")",
"else",
":",
"return",
"\"\"",
".",
"join",
"(",
"p",
".",
"upper",
"(",
")",
"if",
"p",
"in",
"_ALL_CAPS",
"else",
"p",
".",
"title",
"(",
")",
"for",
"p",
"in",
"parts",
")"
] | Converts snake_case to PascalCase. If singularize is True, an attempt is made at singularizing
each part of the resulting name. | [
"Converts",
"snake_case",
"to",
"PascalCase",
".",
"If",
"singularize",
"is",
"True",
"an",
"attempt",
"is",
"made",
"at",
"singularizing",
"each",
"part",
"of",
"the",
"resulting",
"name",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L109-L117 |
248,712 | treycucco/bidon | lib/generate_models.py | to_singular | def to_singular(word):
"""Attempts to singularize a word."""
if word[-1] != "s":
return word
elif word.endswith("ies"):
return word[:-3] + "y"
elif word.endswith("ses"):
return word[:-2]
else:
return word[:-1] | python | def to_singular(word):
"""Attempts to singularize a word."""
if word[-1] != "s":
return word
elif word.endswith("ies"):
return word[:-3] + "y"
elif word.endswith("ses"):
return word[:-2]
else:
return word[:-1] | [
"def",
"to_singular",
"(",
"word",
")",
":",
"if",
"word",
"[",
"-",
"1",
"]",
"!=",
"\"s\"",
":",
"return",
"word",
"elif",
"word",
".",
"endswith",
"(",
"\"ies\"",
")",
":",
"return",
"word",
"[",
":",
"-",
"3",
"]",
"+",
"\"y\"",
"elif",
"word",
".",
"endswith",
"(",
"\"ses\"",
")",
":",
"return",
"word",
"[",
":",
"-",
"2",
"]",
"else",
":",
"return",
"word",
"[",
":",
"-",
"1",
"]"
] | Attempts to singularize a word. | [
"Attempts",
"to",
"singularize",
"a",
"word",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L120-L129 |
248,713 | treycucco/bidon | lib/generate_models.py | get_timestamps | def get_timestamps(cols, created_name, updated_name):
"""Returns a 2-tuple of the timestamp columns that were found on the table definition."""
has_created = created_name in cols
has_updated = updated_name in cols
return (created_name if has_created else None, updated_name if has_updated else None) | python | def get_timestamps(cols, created_name, updated_name):
"""Returns a 2-tuple of the timestamp columns that were found on the table definition."""
has_created = created_name in cols
has_updated = updated_name in cols
return (created_name if has_created else None, updated_name if has_updated else None) | [
"def",
"get_timestamps",
"(",
"cols",
",",
"created_name",
",",
"updated_name",
")",
":",
"has_created",
"=",
"created_name",
"in",
"cols",
"has_updated",
"=",
"updated_name",
"in",
"cols",
"return",
"(",
"created_name",
"if",
"has_created",
"else",
"None",
",",
"updated_name",
"if",
"has_updated",
"else",
"None",
")"
] | Returns a 2-tuple of the timestamp columns that were found on the table definition. | [
"Returns",
"a",
"2",
"-",
"tuple",
"of",
"the",
"timestamp",
"columns",
"that",
"were",
"found",
"on",
"the",
"table",
"definition",
"."
] | d9f24596841d0e69e8ac70a1d1a1deecea95e340 | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/lib/generate_models.py#L145-L149 |
248,714 | Tinche/django-bower-cache | registry/gitwrapper.py | pull_from_origin | def pull_from_origin(repo_path):
"""Execute 'git pull' at the provided repo_path."""
LOG.info("Pulling from origin at %s." % repo_path)
command = GIT_PULL_CMD.format(repo_path)
resp = envoy.run(command)
if resp.status_code != 0:
LOG.exception("Pull failed.")
raise GitException(resp.std_err)
else:
LOG.info("Pull successful.") | python | def pull_from_origin(repo_path):
"""Execute 'git pull' at the provided repo_path."""
LOG.info("Pulling from origin at %s." % repo_path)
command = GIT_PULL_CMD.format(repo_path)
resp = envoy.run(command)
if resp.status_code != 0:
LOG.exception("Pull failed.")
raise GitException(resp.std_err)
else:
LOG.info("Pull successful.") | [
"def",
"pull_from_origin",
"(",
"repo_path",
")",
":",
"LOG",
".",
"info",
"(",
"\"Pulling from origin at %s.\"",
"%",
"repo_path",
")",
"command",
"=",
"GIT_PULL_CMD",
".",
"format",
"(",
"repo_path",
")",
"resp",
"=",
"envoy",
".",
"run",
"(",
"command",
")",
"if",
"resp",
".",
"status_code",
"!=",
"0",
":",
"LOG",
".",
"exception",
"(",
"\"Pull failed.\"",
")",
"raise",
"GitException",
"(",
"resp",
".",
"std_err",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"Pull successful.\"",
")"
] | Execute 'git pull' at the provided repo_path. | [
"Execute",
"git",
"pull",
"at",
"the",
"provided",
"repo_path",
"."
] | 5245b2ee80c33c09d85ce0bf8f047825d9df2118 | https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/gitwrapper.py#L26-L35 |
248,715 | Tinche/django-bower-cache | registry/gitwrapper.py | read_remote_origin | def read_remote_origin(repo_dir):
"""Read the remote origin URL from the given git repo, or None if unset."""
conf = ConfigParser()
conf.read(os.path.join(repo_dir, '.git/config'))
return conf.get('remote "origin"', 'url') | python | def read_remote_origin(repo_dir):
"""Read the remote origin URL from the given git repo, or None if unset."""
conf = ConfigParser()
conf.read(os.path.join(repo_dir, '.git/config'))
return conf.get('remote "origin"', 'url') | [
"def",
"read_remote_origin",
"(",
"repo_dir",
")",
":",
"conf",
"=",
"ConfigParser",
"(",
")",
"conf",
".",
"read",
"(",
"os",
".",
"path",
".",
"join",
"(",
"repo_dir",
",",
"'.git/config'",
")",
")",
"return",
"conf",
".",
"get",
"(",
"'remote \"origin\"'",
",",
"'url'",
")"
] | Read the remote origin URL from the given git repo, or None if unset. | [
"Read",
"the",
"remote",
"origin",
"URL",
"from",
"the",
"given",
"git",
"repo",
"or",
"None",
"if",
"unset",
"."
] | 5245b2ee80c33c09d85ce0bf8f047825d9df2118 | https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/gitwrapper.py#L38-L42 |
248,716 | Tinche/django-bower-cache | registry/gitwrapper.py | clone_from | def clone_from(repo_url, repo_dir):
"""Clone a remote git repo into a local directory."""
repo_url = _fix_repo_url(repo_url)
LOG.info("Cloning %s into %s." % (repo_url, repo_dir))
cmd = GIT_CLONE_CMD.format(repo_url, repo_dir)
resp = envoy.run(cmd)
if resp.status_code != 0:
LOG.error("Cloned failed: %s" % resp.std_err)
raise GitException(resp.std_err)
LOG.info("Clone successful.") | python | def clone_from(repo_url, repo_dir):
"""Clone a remote git repo into a local directory."""
repo_url = _fix_repo_url(repo_url)
LOG.info("Cloning %s into %s." % (repo_url, repo_dir))
cmd = GIT_CLONE_CMD.format(repo_url, repo_dir)
resp = envoy.run(cmd)
if resp.status_code != 0:
LOG.error("Cloned failed: %s" % resp.std_err)
raise GitException(resp.std_err)
LOG.info("Clone successful.") | [
"def",
"clone_from",
"(",
"repo_url",
",",
"repo_dir",
")",
":",
"repo_url",
"=",
"_fix_repo_url",
"(",
"repo_url",
")",
"LOG",
".",
"info",
"(",
"\"Cloning %s into %s.\"",
"%",
"(",
"repo_url",
",",
"repo_dir",
")",
")",
"cmd",
"=",
"GIT_CLONE_CMD",
".",
"format",
"(",
"repo_url",
",",
"repo_dir",
")",
"resp",
"=",
"envoy",
".",
"run",
"(",
"cmd",
")",
"if",
"resp",
".",
"status_code",
"!=",
"0",
":",
"LOG",
".",
"error",
"(",
"\"Cloned failed: %s\"",
"%",
"resp",
".",
"std_err",
")",
"raise",
"GitException",
"(",
"resp",
".",
"std_err",
")",
"LOG",
".",
"info",
"(",
"\"Clone successful.\"",
")"
] | Clone a remote git repo into a local directory. | [
"Clone",
"a",
"remote",
"git",
"repo",
"into",
"a",
"local",
"directory",
"."
] | 5245b2ee80c33c09d85ce0bf8f047825d9df2118 | https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/gitwrapper.py#L45-L54 |
248,717 | zweifisch/biro | biro/__init__.py | route | def route(method, pattern, handler=None):
"""register a routing rule
Example:
route('GET', '/path/<param>', handler)
"""
if handler is None:
return partial(route, method, pattern)
return routes.append(method, pattern, handler) | python | def route(method, pattern, handler=None):
"""register a routing rule
Example:
route('GET', '/path/<param>', handler)
"""
if handler is None:
return partial(route, method, pattern)
return routes.append(method, pattern, handler) | [
"def",
"route",
"(",
"method",
",",
"pattern",
",",
"handler",
"=",
"None",
")",
":",
"if",
"handler",
"is",
"None",
":",
"return",
"partial",
"(",
"route",
",",
"method",
",",
"pattern",
")",
"return",
"routes",
".",
"append",
"(",
"method",
",",
"pattern",
",",
"handler",
")"
] | register a routing rule
Example:
route('GET', '/path/<param>', handler) | [
"register",
"a",
"routing",
"rule"
] | 0712746de65ff1e25b4f99c669eddd1fb8d1043e | https://github.com/zweifisch/biro/blob/0712746de65ff1e25b4f99c669eddd1fb8d1043e/biro/__init__.py#L14-L24 |
248,718 | eallik/spinoff | spinoff/actor/util.py | Container.spawn | def spawn(self, owner, *args, **kwargs):
"""Spawns a new subordinate actor of `owner` and stores it in this container.
jobs = Container()
...
jobs.spawn(self, Job)
jobs.spawn(self, Job, some_param=123)
jobs = Container(Job)
...
jobs.spawn(self)
jobs.spawn(self, some_param=123)
jobs = Container(Job.using('abc', some_kwarg=321))
...
jobs.spawn(self, extra_kwarg=123)
jobs.spawn(self, some_kwarg=123, extra_kwarg=123)
jobs.spawn(self, 'xyz', some_kwarg=345, extra_kwarg=567)
"""
return (self._spawn(owner, self.factory, *args, **kwargs)
if self.factory else
self._spawn(owner, *args, **kwargs)) | python | def spawn(self, owner, *args, **kwargs):
"""Spawns a new subordinate actor of `owner` and stores it in this container.
jobs = Container()
...
jobs.spawn(self, Job)
jobs.spawn(self, Job, some_param=123)
jobs = Container(Job)
...
jobs.spawn(self)
jobs.spawn(self, some_param=123)
jobs = Container(Job.using('abc', some_kwarg=321))
...
jobs.spawn(self, extra_kwarg=123)
jobs.spawn(self, some_kwarg=123, extra_kwarg=123)
jobs.spawn(self, 'xyz', some_kwarg=345, extra_kwarg=567)
"""
return (self._spawn(owner, self.factory, *args, **kwargs)
if self.factory else
self._spawn(owner, *args, **kwargs)) | [
"def",
"spawn",
"(",
"self",
",",
"owner",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"self",
".",
"_spawn",
"(",
"owner",
",",
"self",
".",
"factory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"factory",
"else",
"self",
".",
"_spawn",
"(",
"owner",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | Spawns a new subordinate actor of `owner` and stores it in this container.
jobs = Container()
...
jobs.spawn(self, Job)
jobs.spawn(self, Job, some_param=123)
jobs = Container(Job)
...
jobs.spawn(self)
jobs.spawn(self, some_param=123)
jobs = Container(Job.using('abc', some_kwarg=321))
...
jobs.spawn(self, extra_kwarg=123)
jobs.spawn(self, some_kwarg=123, extra_kwarg=123)
jobs.spawn(self, 'xyz', some_kwarg=345, extra_kwarg=567) | [
"Spawns",
"a",
"new",
"subordinate",
"actor",
"of",
"owner",
"and",
"stores",
"it",
"in",
"this",
"container",
"."
] | 06b00d6b86c7422c9cb8f9a4b2915906e92b7d52 | https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/actor/util.py#L22-L44 |
248,719 | naphatkrit/easyci | easyci/commands/watch.py | watch | def watch(ctx):
"""Watch the directory for changes. Automatically run tests.
"""
vcs = ctx.obj['vcs']
event_handler = TestsEventHandler(vcs)
observer = Observer()
observer.schedule(event_handler, vcs.path, recursive=True)
observer.start()
click.echo('Watching directory `{path}`. Use ctrl-c to stop.'.format(path=vcs.path))
while observer.isAlive():
observer.join(timeout=1) | python | def watch(ctx):
"""Watch the directory for changes. Automatically run tests.
"""
vcs = ctx.obj['vcs']
event_handler = TestsEventHandler(vcs)
observer = Observer()
observer.schedule(event_handler, vcs.path, recursive=True)
observer.start()
click.echo('Watching directory `{path}`. Use ctrl-c to stop.'.format(path=vcs.path))
while observer.isAlive():
observer.join(timeout=1) | [
"def",
"watch",
"(",
"ctx",
")",
":",
"vcs",
"=",
"ctx",
".",
"obj",
"[",
"'vcs'",
"]",
"event_handler",
"=",
"TestsEventHandler",
"(",
"vcs",
")",
"observer",
"=",
"Observer",
"(",
")",
"observer",
".",
"schedule",
"(",
"event_handler",
",",
"vcs",
".",
"path",
",",
"recursive",
"=",
"True",
")",
"observer",
".",
"start",
"(",
")",
"click",
".",
"echo",
"(",
"'Watching directory `{path}`. Use ctrl-c to stop.'",
".",
"format",
"(",
"path",
"=",
"vcs",
".",
"path",
")",
")",
"while",
"observer",
".",
"isAlive",
"(",
")",
":",
"observer",
".",
"join",
"(",
"timeout",
"=",
"1",
")"
] | Watch the directory for changes. Automatically run tests. | [
"Watch",
"the",
"directory",
"for",
"changes",
".",
"Automatically",
"run",
"tests",
"."
] | 7aee8d7694fe4e2da42ce35b0f700bc840c8b95f | https://github.com/naphatkrit/easyci/blob/7aee8d7694fe4e2da42ce35b0f700bc840c8b95f/easyci/commands/watch.py#L12-L24 |
248,720 | minhhoit/yacms | yacms/conf/context_processors.py | settings | def settings(request=None):
"""
Add the settings object to the template context.
"""
from yacms.conf import settings
allowed_settings = settings.TEMPLATE_ACCESSIBLE_SETTINGS
template_settings = TemplateSettings(settings, allowed_settings)
template_settings.update(DEPRECATED)
# This is basically the same as the old ADMIN_MEDIA_PREFIX setting,
# we just use it in a few spots in the admin to optionally load a
# file from either grappelli or Django admin if grappelli isn't
# installed. We don't call it ADMIN_MEDIA_PREFIX in order to avoid
# any confusion.
admin_prefix = "grappelli/" if settings.GRAPPELLI_INSTALLED else "admin/"
template_settings["YACMS_ADMIN_PREFIX"] = admin_prefix
return {"settings": template_settings} | python | def settings(request=None):
"""
Add the settings object to the template context.
"""
from yacms.conf import settings
allowed_settings = settings.TEMPLATE_ACCESSIBLE_SETTINGS
template_settings = TemplateSettings(settings, allowed_settings)
template_settings.update(DEPRECATED)
# This is basically the same as the old ADMIN_MEDIA_PREFIX setting,
# we just use it in a few spots in the admin to optionally load a
# file from either grappelli or Django admin if grappelli isn't
# installed. We don't call it ADMIN_MEDIA_PREFIX in order to avoid
# any confusion.
admin_prefix = "grappelli/" if settings.GRAPPELLI_INSTALLED else "admin/"
template_settings["YACMS_ADMIN_PREFIX"] = admin_prefix
return {"settings": template_settings} | [
"def",
"settings",
"(",
"request",
"=",
"None",
")",
":",
"from",
"yacms",
".",
"conf",
"import",
"settings",
"allowed_settings",
"=",
"settings",
".",
"TEMPLATE_ACCESSIBLE_SETTINGS",
"template_settings",
"=",
"TemplateSettings",
"(",
"settings",
",",
"allowed_settings",
")",
"template_settings",
".",
"update",
"(",
"DEPRECATED",
")",
"# This is basically the same as the old ADMIN_MEDIA_PREFIX setting,",
"# we just use it in a few spots in the admin to optionally load a",
"# file from either grappelli or Django admin if grappelli isn't",
"# installed. We don't call it ADMIN_MEDIA_PREFIX in order to avoid",
"# any confusion.",
"admin_prefix",
"=",
"\"grappelli/\"",
"if",
"settings",
".",
"GRAPPELLI_INSTALLED",
"else",
"\"admin/\"",
"template_settings",
"[",
"\"YACMS_ADMIN_PREFIX\"",
"]",
"=",
"admin_prefix",
"return",
"{",
"\"settings\"",
":",
"template_settings",
"}"
] | Add the settings object to the template context. | [
"Add",
"the",
"settings",
"object",
"to",
"the",
"template",
"context",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/conf/context_processors.py#L51-L70 |
248,721 | jut-io/jut-python-tools | jut/commands/programs.py | list | def list(options):
"""
list programs that belong to the authenticated user
"""
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if options.all == True:
account_id = None
else:
account_id = accounts.get_logged_in_account_id(token_manager=token_manager,
app_url=app_url)
programs_details = programs.get_programs(deployment_name,
token_manager=token_manager,
created_by=account_id,
app_url=app_url)
account_ids = set()
for program in programs_details:
account_ids.add(program['createdBy'])
accounts_details = accounts.get_accounts(account_ids,
token_manager=token_manager,
app_url=app_url)
account_lookup = {}
for account in accounts_details['accounts']:
account_lookup[account['id']] = account
headers = ['Name', 'Last Saved', 'Created By']
table = []
for program in programs_details:
username = account_lookup[program['createdBy']]['username']
program_name = program['name']
last_edited = program['lastEdited']
table.append([program_name, last_edited, username])
if options.format == 'table':
info(tabulate.tabulate(table, headers, tablefmt='orgtbl'))
elif options.format == 'text':
info(tabulate.tabulate(table, headers, tablefmt='orgtbl', stralign='center'))
else:
raise JutException('Unsupported format "%s"' % options.format) | python | def list(options):
"""
list programs that belong to the authenticated user
"""
configuration = config.get_default()
app_url = configuration['app_url']
if options.deployment != None:
deployment_name = options.deployment
else:
deployment_name = configuration['deployment_name']
client_id = configuration['client_id']
client_secret = configuration['client_secret']
token_manager = auth.TokenManager(client_id=client_id,
client_secret=client_secret,
app_url=app_url)
if options.all == True:
account_id = None
else:
account_id = accounts.get_logged_in_account_id(token_manager=token_manager,
app_url=app_url)
programs_details = programs.get_programs(deployment_name,
token_manager=token_manager,
created_by=account_id,
app_url=app_url)
account_ids = set()
for program in programs_details:
account_ids.add(program['createdBy'])
accounts_details = accounts.get_accounts(account_ids,
token_manager=token_manager,
app_url=app_url)
account_lookup = {}
for account in accounts_details['accounts']:
account_lookup[account['id']] = account
headers = ['Name', 'Last Saved', 'Created By']
table = []
for program in programs_details:
username = account_lookup[program['createdBy']]['username']
program_name = program['name']
last_edited = program['lastEdited']
table.append([program_name, last_edited, username])
if options.format == 'table':
info(tabulate.tabulate(table, headers, tablefmt='orgtbl'))
elif options.format == 'text':
info(tabulate.tabulate(table, headers, tablefmt='orgtbl', stralign='center'))
else:
raise JutException('Unsupported format "%s"' % options.format) | [
"def",
"list",
"(",
"options",
")",
":",
"configuration",
"=",
"config",
".",
"get_default",
"(",
")",
"app_url",
"=",
"configuration",
"[",
"'app_url'",
"]",
"if",
"options",
".",
"deployment",
"!=",
"None",
":",
"deployment_name",
"=",
"options",
".",
"deployment",
"else",
":",
"deployment_name",
"=",
"configuration",
"[",
"'deployment_name'",
"]",
"client_id",
"=",
"configuration",
"[",
"'client_id'",
"]",
"client_secret",
"=",
"configuration",
"[",
"'client_secret'",
"]",
"token_manager",
"=",
"auth",
".",
"TokenManager",
"(",
"client_id",
"=",
"client_id",
",",
"client_secret",
"=",
"client_secret",
",",
"app_url",
"=",
"app_url",
")",
"if",
"options",
".",
"all",
"==",
"True",
":",
"account_id",
"=",
"None",
"else",
":",
"account_id",
"=",
"accounts",
".",
"get_logged_in_account_id",
"(",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"programs_details",
"=",
"programs",
".",
"get_programs",
"(",
"deployment_name",
",",
"token_manager",
"=",
"token_manager",
",",
"created_by",
"=",
"account_id",
",",
"app_url",
"=",
"app_url",
")",
"account_ids",
"=",
"set",
"(",
")",
"for",
"program",
"in",
"programs_details",
":",
"account_ids",
".",
"add",
"(",
"program",
"[",
"'createdBy'",
"]",
")",
"accounts_details",
"=",
"accounts",
".",
"get_accounts",
"(",
"account_ids",
",",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"account_lookup",
"=",
"{",
"}",
"for",
"account",
"in",
"accounts_details",
"[",
"'accounts'",
"]",
":",
"account_lookup",
"[",
"account",
"[",
"'id'",
"]",
"]",
"=",
"account",
"headers",
"=",
"[",
"'Name'",
",",
"'Last Saved'",
",",
"'Created By'",
"]",
"table",
"=",
"[",
"]",
"for",
"program",
"in",
"programs_details",
":",
"username",
"=",
"account_lookup",
"[",
"program",
"[",
"'createdBy'",
"]",
"]",
"[",
"'username'",
"]",
"program_name",
"=",
"program",
"[",
"'name'",
"]",
"last_edited",
"=",
"program",
"[",
"'lastEdited'",
"]",
"table",
".",
"append",
"(",
"[",
"program_name",
",",
"last_edited",
",",
"username",
"]",
")",
"if",
"options",
".",
"format",
"==",
"'table'",
":",
"info",
"(",
"tabulate",
".",
"tabulate",
"(",
"table",
",",
"headers",
",",
"tablefmt",
"=",
"'orgtbl'",
")",
")",
"elif",
"options",
".",
"format",
"==",
"'text'",
":",
"info",
"(",
"tabulate",
".",
"tabulate",
"(",
"table",
",",
"headers",
",",
"tablefmt",
"=",
"'orgtbl'",
",",
"stralign",
"=",
"'center'",
")",
")",
"else",
":",
"raise",
"JutException",
"(",
"'Unsupported format \"%s\"'",
"%",
"options",
".",
"format",
")"
] | list programs that belong to the authenticated user | [
"list",
"programs",
"that",
"belong",
"to",
"the",
"authenticated",
"user"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/commands/programs.py#L22-L82 |
248,722 | spookey/photon | photon/photon.py | Photon.m | def m(self, msg,
state=False, more=None, cmdd=None, critical=True, verbose=None):
'''
Mysterious mega method managing multiple meshed modules magically
.. note:: If this function is used, the code contains facepalms: ``m(``
* It is possible to just show a message, \
or to run a command with message.
* But it is not possible to run a command without a message, \
use the `verbose`-flag to hide your debug message.
:param msg:
Add a message. Shown depending on `verbose` (see below)
:param state:
Pass `state` down to :func:`util.system.shell_notify`
:param more:
Pass `more` down to :func:`util.system.shell_notify`
:param dict cmdd:
If given, :func:`util.system.shell_run` is launched with
it's values
:param critical:
If set to ``True``: |appteardown| on failure of `cmdd` contents.
* Similar to :func:`util.system.shell_run` `critical`-flag
:param verbose:
Overrules parent's class `verbose`-flag.
* If left to ``None``, the verbose value Photon \
was started with is used
* Messages are shown/hidden if explicitly set to ``True``/``False``
:returns:
A dictionary specified the following:
* 'more':
`more` if it is not a dictionary otherwise \
it gets merged in if `more` is specified
* The output of :func:`util.system.shell_run` gets merged in \
if `cmdd` is specified
* 'failed': ``True`` if command failed
:func:`util.system.shell_notify` is used with this dictionary
to pipe it's output into :func:`meta.Meta.log` before returning.
'''
if verbose is None:
verbose = self.__verbose
res = dict()
if more:
res.update(more if isinstance(more, dict) else dict(more=more))
if cmdd and isinstance(cmdd, dict) and cmdd.get('cmd'):
res.update(shell_run(
cmdd.get('cmd'),
cin=cmdd.get('cin'),
cwd=cmdd.get('cwd'),
timeout=cmdd.get('timeout', 120),
critical=False,
verbose=cmdd.get('verbose', verbose)
))
if res.get('returncode', -1) != 0:
res.update(dict(failed=True))
if state or critical and res.get('failed'):
self.meta.log = dict(message=msg, more=res, verbose=verbose)
shell_notify(msg, more=res, state=True)
self.meta.log = shell_notify(msg, more=res,
state=state, verbose=verbose)
return res | python | def m(self, msg,
state=False, more=None, cmdd=None, critical=True, verbose=None):
'''
Mysterious mega method managing multiple meshed modules magically
.. note:: If this function is used, the code contains facepalms: ``m(``
* It is possible to just show a message, \
or to run a command with message.
* But it is not possible to run a command without a message, \
use the `verbose`-flag to hide your debug message.
:param msg:
Add a message. Shown depending on `verbose` (see below)
:param state:
Pass `state` down to :func:`util.system.shell_notify`
:param more:
Pass `more` down to :func:`util.system.shell_notify`
:param dict cmdd:
If given, :func:`util.system.shell_run` is launched with
it's values
:param critical:
If set to ``True``: |appteardown| on failure of `cmdd` contents.
* Similar to :func:`util.system.shell_run` `critical`-flag
:param verbose:
Overrules parent's class `verbose`-flag.
* If left to ``None``, the verbose value Photon \
was started with is used
* Messages are shown/hidden if explicitly set to ``True``/``False``
:returns:
A dictionary specified the following:
* 'more':
`more` if it is not a dictionary otherwise \
it gets merged in if `more` is specified
* The output of :func:`util.system.shell_run` gets merged in \
if `cmdd` is specified
* 'failed': ``True`` if command failed
:func:`util.system.shell_notify` is used with this dictionary
to pipe it's output into :func:`meta.Meta.log` before returning.
'''
if verbose is None:
verbose = self.__verbose
res = dict()
if more:
res.update(more if isinstance(more, dict) else dict(more=more))
if cmdd and isinstance(cmdd, dict) and cmdd.get('cmd'):
res.update(shell_run(
cmdd.get('cmd'),
cin=cmdd.get('cin'),
cwd=cmdd.get('cwd'),
timeout=cmdd.get('timeout', 120),
critical=False,
verbose=cmdd.get('verbose', verbose)
))
if res.get('returncode', -1) != 0:
res.update(dict(failed=True))
if state or critical and res.get('failed'):
self.meta.log = dict(message=msg, more=res, verbose=verbose)
shell_notify(msg, more=res, state=True)
self.meta.log = shell_notify(msg, more=res,
state=state, verbose=verbose)
return res | [
"def",
"m",
"(",
"self",
",",
"msg",
",",
"state",
"=",
"False",
",",
"more",
"=",
"None",
",",
"cmdd",
"=",
"None",
",",
"critical",
"=",
"True",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"self",
".",
"__verbose",
"res",
"=",
"dict",
"(",
")",
"if",
"more",
":",
"res",
".",
"update",
"(",
"more",
"if",
"isinstance",
"(",
"more",
",",
"dict",
")",
"else",
"dict",
"(",
"more",
"=",
"more",
")",
")",
"if",
"cmdd",
"and",
"isinstance",
"(",
"cmdd",
",",
"dict",
")",
"and",
"cmdd",
".",
"get",
"(",
"'cmd'",
")",
":",
"res",
".",
"update",
"(",
"shell_run",
"(",
"cmdd",
".",
"get",
"(",
"'cmd'",
")",
",",
"cin",
"=",
"cmdd",
".",
"get",
"(",
"'cin'",
")",
",",
"cwd",
"=",
"cmdd",
".",
"get",
"(",
"'cwd'",
")",
",",
"timeout",
"=",
"cmdd",
".",
"get",
"(",
"'timeout'",
",",
"120",
")",
",",
"critical",
"=",
"False",
",",
"verbose",
"=",
"cmdd",
".",
"get",
"(",
"'verbose'",
",",
"verbose",
")",
")",
")",
"if",
"res",
".",
"get",
"(",
"'returncode'",
",",
"-",
"1",
")",
"!=",
"0",
":",
"res",
".",
"update",
"(",
"dict",
"(",
"failed",
"=",
"True",
")",
")",
"if",
"state",
"or",
"critical",
"and",
"res",
".",
"get",
"(",
"'failed'",
")",
":",
"self",
".",
"meta",
".",
"log",
"=",
"dict",
"(",
"message",
"=",
"msg",
",",
"more",
"=",
"res",
",",
"verbose",
"=",
"verbose",
")",
"shell_notify",
"(",
"msg",
",",
"more",
"=",
"res",
",",
"state",
"=",
"True",
")",
"self",
".",
"meta",
".",
"log",
"=",
"shell_notify",
"(",
"msg",
",",
"more",
"=",
"res",
",",
"state",
"=",
"state",
",",
"verbose",
"=",
"verbose",
")",
"return",
"res"
] | Mysterious mega method managing multiple meshed modules magically
.. note:: If this function is used, the code contains facepalms: ``m(``
* It is possible to just show a message, \
or to run a command with message.
* But it is not possible to run a command without a message, \
use the `verbose`-flag to hide your debug message.
:param msg:
Add a message. Shown depending on `verbose` (see below)
:param state:
Pass `state` down to :func:`util.system.shell_notify`
:param more:
Pass `more` down to :func:`util.system.shell_notify`
:param dict cmdd:
If given, :func:`util.system.shell_run` is launched with
it's values
:param critical:
If set to ``True``: |appteardown| on failure of `cmdd` contents.
* Similar to :func:`util.system.shell_run` `critical`-flag
:param verbose:
Overrules parent's class `verbose`-flag.
* If left to ``None``, the verbose value Photon \
was started with is used
* Messages are shown/hidden if explicitly set to ``True``/``False``
:returns:
A dictionary specified the following:
* 'more':
`more` if it is not a dictionary otherwise \
it gets merged in if `more` is specified
* The output of :func:`util.system.shell_run` gets merged in \
if `cmdd` is specified
* 'failed': ``True`` if command failed
:func:`util.system.shell_notify` is used with this dictionary
to pipe it's output into :func:`meta.Meta.log` before returning. | [
"Mysterious",
"mega",
"method",
"managing",
"multiple",
"meshed",
"modules",
"magically"
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/photon.py#L91-L167 |
248,723 | spookey/photon | photon/photon.py | Photon.s2m | def s2m(self):
'''
Imports settings to meta
'''
m = '%s settings' % (IDENT)
self.meta.load(m, 'import %s' % (m), mdict=self.settings.get) | python | def s2m(self):
'''
Imports settings to meta
'''
m = '%s settings' % (IDENT)
self.meta.load(m, 'import %s' % (m), mdict=self.settings.get) | [
"def",
"s2m",
"(",
"self",
")",
":",
"m",
"=",
"'%s settings'",
"%",
"(",
"IDENT",
")",
"self",
".",
"meta",
".",
"load",
"(",
"m",
",",
"'import %s'",
"%",
"(",
"m",
")",
",",
"mdict",
"=",
"self",
".",
"settings",
".",
"get",
")"
] | Imports settings to meta | [
"Imports",
"settings",
"to",
"meta"
] | 57212a26ce713ab7723910ee49e3d0ba1697799f | https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/photon.py#L170-L176 |
248,724 | nickmilon/Hellas | Hellas/Pella.py | file_to_base64 | def file_to_base64(path_or_obj, max_mb=None):
"""converts contents of a file to base64 encoding
:param str_or_object path_or_obj: fool pathname string for a file or a file like object that supports read
:param int max_mb: maximum number in MegaBytes to accept
:param float lon2: longitude of second place (decimal degrees)
:raises ErrorFileTooBig: if file contents > max_mb (see :class:`ErrorFileTooBig`)
:raises IOError: if file path can't be found (Also possible other exceptions depending on file_object)
"""
if not hasattr(path_or_obj, 'read'):
rt = read_file(path_or_obj)
else:
rt = path_or_obj.read()
if max_mb:
len_mb = len(rt) / (10024.0 * 1000)
if len_mb > max_mb:
raise ErrorFileTooBig("File is too big ({.2f} MBytes)" (len_mb))
return b64encode(rt) | python | def file_to_base64(path_or_obj, max_mb=None):
"""converts contents of a file to base64 encoding
:param str_or_object path_or_obj: fool pathname string for a file or a file like object that supports read
:param int max_mb: maximum number in MegaBytes to accept
:param float lon2: longitude of second place (decimal degrees)
:raises ErrorFileTooBig: if file contents > max_mb (see :class:`ErrorFileTooBig`)
:raises IOError: if file path can't be found (Also possible other exceptions depending on file_object)
"""
if not hasattr(path_or_obj, 'read'):
rt = read_file(path_or_obj)
else:
rt = path_or_obj.read()
if max_mb:
len_mb = len(rt) / (10024.0 * 1000)
if len_mb > max_mb:
raise ErrorFileTooBig("File is too big ({.2f} MBytes)" (len_mb))
return b64encode(rt) | [
"def",
"file_to_base64",
"(",
"path_or_obj",
",",
"max_mb",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"path_or_obj",
",",
"'read'",
")",
":",
"rt",
"=",
"read_file",
"(",
"path_or_obj",
")",
"else",
":",
"rt",
"=",
"path_or_obj",
".",
"read",
"(",
")",
"if",
"max_mb",
":",
"len_mb",
"=",
"len",
"(",
"rt",
")",
"/",
"(",
"10024.0",
"*",
"1000",
")",
"if",
"len_mb",
">",
"max_mb",
":",
"raise",
"ErrorFileTooBig",
"(",
"\"File is too big ({.2f} MBytes)\"",
"(",
"len_mb",
")",
")",
"return",
"b64encode",
"(",
"rt",
")"
] | converts contents of a file to base64 encoding
:param str_or_object path_or_obj: fool pathname string for a file or a file like object that supports read
:param int max_mb: maximum number in MegaBytes to accept
:param float lon2: longitude of second place (decimal degrees)
:raises ErrorFileTooBig: if file contents > max_mb (see :class:`ErrorFileTooBig`)
:raises IOError: if file path can't be found (Also possible other exceptions depending on file_object) | [
"converts",
"contents",
"of",
"a",
"file",
"to",
"base64",
"encoding"
] | 542e4778692fbec90753942946f20100412ec9ee | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Pella.py#L27-L45 |
248,725 | nickmilon/Hellas | Hellas/Pella.py | dict_clip | def dict_clip(a_dict, inlude_keys_lst=[]):
"""returns a new dict with keys not in included in inlude_keys_lst clipped off"""
return dict([[i[0], i[1]] for i in list(a_dict.items()) if i[0] in inlude_keys_lst]) | python | def dict_clip(a_dict, inlude_keys_lst=[]):
"""returns a new dict with keys not in included in inlude_keys_lst clipped off"""
return dict([[i[0], i[1]] for i in list(a_dict.items()) if i[0] in inlude_keys_lst]) | [
"def",
"dict_clip",
"(",
"a_dict",
",",
"inlude_keys_lst",
"=",
"[",
"]",
")",
":",
"return",
"dict",
"(",
"[",
"[",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
"]",
"for",
"i",
"in",
"list",
"(",
"a_dict",
".",
"items",
"(",
")",
")",
"if",
"i",
"[",
"0",
"]",
"in",
"inlude_keys_lst",
"]",
")"
] | returns a new dict with keys not in included in inlude_keys_lst clipped off | [
"returns",
"a",
"new",
"dict",
"with",
"keys",
"not",
"in",
"included",
"in",
"inlude_keys_lst",
"clipped",
"off"
] | 542e4778692fbec90753942946f20100412ec9ee | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Pella.py#L63-L65 |
248,726 | nickmilon/Hellas | Hellas/Pella.py | list_pp | def list_pp(ll, separator='|', header_line=True, autonumber=True):
"""pretty print list of lists ll"""
if autonumber:
for cnt, i in enumerate(ll):
i.insert(0, cnt if cnt > 0 or not header_line else '#')
def lenlst(l):
return [len(str(i)) for i in l]
lst_len = [lenlst(i) for i in ll]
lst_rot = zip(*lst_len[::-1])
lst_len = [max(i) for i in lst_rot]
frmt = separator + separator.join(["{!s:"+str(i)+"}" for i in lst_len]) + separator
if header_line:
header_line = '-' * len(frmt.format(*ll[0]))
for cnt, l in enumerate(ll):
if cnt < 2 and header_line:
print(header_line)
print(frmt.format(*l))
if header_line:
print(header_line)
return lst_len | python | def list_pp(ll, separator='|', header_line=True, autonumber=True):
"""pretty print list of lists ll"""
if autonumber:
for cnt, i in enumerate(ll):
i.insert(0, cnt if cnt > 0 or not header_line else '#')
def lenlst(l):
return [len(str(i)) for i in l]
lst_len = [lenlst(i) for i in ll]
lst_rot = zip(*lst_len[::-1])
lst_len = [max(i) for i in lst_rot]
frmt = separator + separator.join(["{!s:"+str(i)+"}" for i in lst_len]) + separator
if header_line:
header_line = '-' * len(frmt.format(*ll[0]))
for cnt, l in enumerate(ll):
if cnt < 2 and header_line:
print(header_line)
print(frmt.format(*l))
if header_line:
print(header_line)
return lst_len | [
"def",
"list_pp",
"(",
"ll",
",",
"separator",
"=",
"'|'",
",",
"header_line",
"=",
"True",
",",
"autonumber",
"=",
"True",
")",
":",
"if",
"autonumber",
":",
"for",
"cnt",
",",
"i",
"in",
"enumerate",
"(",
"ll",
")",
":",
"i",
".",
"insert",
"(",
"0",
",",
"cnt",
"if",
"cnt",
">",
"0",
"or",
"not",
"header_line",
"else",
"'#'",
")",
"def",
"lenlst",
"(",
"l",
")",
":",
"return",
"[",
"len",
"(",
"str",
"(",
"i",
")",
")",
"for",
"i",
"in",
"l",
"]",
"lst_len",
"=",
"[",
"lenlst",
"(",
"i",
")",
"for",
"i",
"in",
"ll",
"]",
"lst_rot",
"=",
"zip",
"(",
"*",
"lst_len",
"[",
":",
":",
"-",
"1",
"]",
")",
"lst_len",
"=",
"[",
"max",
"(",
"i",
")",
"for",
"i",
"in",
"lst_rot",
"]",
"frmt",
"=",
"separator",
"+",
"separator",
".",
"join",
"(",
"[",
"\"{!s:\"",
"+",
"str",
"(",
"i",
")",
"+",
"\"}\"",
"for",
"i",
"in",
"lst_len",
"]",
")",
"+",
"separator",
"if",
"header_line",
":",
"header_line",
"=",
"'-'",
"*",
"len",
"(",
"frmt",
".",
"format",
"(",
"*",
"ll",
"[",
"0",
"]",
")",
")",
"for",
"cnt",
",",
"l",
"in",
"enumerate",
"(",
"ll",
")",
":",
"if",
"cnt",
"<",
"2",
"and",
"header_line",
":",
"print",
"(",
"header_line",
")",
"print",
"(",
"frmt",
".",
"format",
"(",
"*",
"l",
")",
")",
"if",
"header_line",
":",
"print",
"(",
"header_line",
")",
"return",
"lst_len"
] | pretty print list of lists ll | [
"pretty",
"print",
"list",
"of",
"lists",
"ll"
] | 542e4778692fbec90753942946f20100412ec9ee | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Pella.py#L74-L95 |
248,727 | nickmilon/Hellas | Hellas/Pella.py | signal_terminate | def signal_terminate(on_terminate):
"""a common case program termination signal"""
for i in [signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGTERM]:
signal.signal(i, on_terminate) | python | def signal_terminate(on_terminate):
"""a common case program termination signal"""
for i in [signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1, signal.SIGUSR2, signal.SIGTERM]:
signal.signal(i, on_terminate) | [
"def",
"signal_terminate",
"(",
"on_terminate",
")",
":",
"for",
"i",
"in",
"[",
"signal",
".",
"SIGINT",
",",
"signal",
".",
"SIGQUIT",
",",
"signal",
".",
"SIGUSR1",
",",
"signal",
".",
"SIGUSR2",
",",
"signal",
".",
"SIGTERM",
"]",
":",
"signal",
".",
"signal",
"(",
"i",
",",
"on_terminate",
")"
] | a common case program termination signal | [
"a",
"common",
"case",
"program",
"termination",
"signal"
] | 542e4778692fbec90753942946f20100412ec9ee | https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Pella.py#L99-L102 |
248,728 | polysquare/jobstamps | jobstamps/jobstamp.py | _safe_mkdir | def _safe_mkdir(directory):
"""Create a directory, ignoring errors if it already exists."""
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise error | python | def _safe_mkdir(directory):
"""Create a directory, ignoring errors if it already exists."""
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise error | [
"def",
"_safe_mkdir",
"(",
"directory",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"directory",
")",
"except",
"OSError",
"as",
"error",
":",
"if",
"error",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"error"
] | Create a directory, ignoring errors if it already exists. | [
"Create",
"a",
"directory",
"ignoring",
"errors",
"if",
"it",
"already",
"exists",
"."
] | 49b4dec93b38c9db55643226a9788c675a53ef25 | https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L23-L29 |
248,729 | polysquare/jobstamps | jobstamps/jobstamp.py | _stamp_and_update_hook | def _stamp_and_update_hook(method, # suppress(too-many-arguments)
dependencies,
stampfile,
func,
*args,
**kwargs):
"""Write stamp and call update_stampfile_hook on method."""
result = _stamp(stampfile, func, *args, **kwargs)
method.update_stampfile_hook(dependencies)
return result | python | def _stamp_and_update_hook(method, # suppress(too-many-arguments)
dependencies,
stampfile,
func,
*args,
**kwargs):
"""Write stamp and call update_stampfile_hook on method."""
result = _stamp(stampfile, func, *args, **kwargs)
method.update_stampfile_hook(dependencies)
return result | [
"def",
"_stamp_and_update_hook",
"(",
"method",
",",
"# suppress(too-many-arguments)",
"dependencies",
",",
"stampfile",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"_stamp",
"(",
"stampfile",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"method",
".",
"update_stampfile_hook",
"(",
"dependencies",
")",
"return",
"result"
] | Write stamp and call update_stampfile_hook on method. | [
"Write",
"stamp",
"and",
"call",
"update_stampfile_hook",
"on",
"method",
"."
] | 49b4dec93b38c9db55643226a9788c675a53ef25 | https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L42-L51 |
248,730 | polysquare/jobstamps | jobstamps/jobstamp.py | _sha1_for_file | def _sha1_for_file(filename):
"""Return sha1 for contents of filename."""
with open(filename, "rb") as fileobj:
contents = fileobj.read()
return hashlib.sha1(contents).hexdigest() | python | def _sha1_for_file(filename):
"""Return sha1 for contents of filename."""
with open(filename, "rb") as fileobj:
contents = fileobj.read()
return hashlib.sha1(contents).hexdigest() | [
"def",
"_sha1_for_file",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"fileobj",
":",
"contents",
"=",
"fileobj",
".",
"read",
"(",
")",
"return",
"hashlib",
".",
"sha1",
"(",
"contents",
")",
".",
"hexdigest",
"(",
")"
] | Return sha1 for contents of filename. | [
"Return",
"sha1",
"for",
"contents",
"of",
"filename",
"."
] | 49b4dec93b38c9db55643226a9788c675a53ef25 | https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L54-L58 |
248,731 | polysquare/jobstamps | jobstamps/jobstamp.py | HashMethod.check_dependency | def check_dependency(self, dependency_path):
"""Check if mtime of dependency_path is greater than stored mtime."""
stored_hash = self._stamp_file_hashes.get(dependency_path)
# This file was newly added, or we don't have a file
# with stored hashes yet. Assume out of date.
if not stored_hash:
return False
return stored_hash == _sha1_for_file(dependency_path) | python | def check_dependency(self, dependency_path):
"""Check if mtime of dependency_path is greater than stored mtime."""
stored_hash = self._stamp_file_hashes.get(dependency_path)
# This file was newly added, or we don't have a file
# with stored hashes yet. Assume out of date.
if not stored_hash:
return False
return stored_hash == _sha1_for_file(dependency_path) | [
"def",
"check_dependency",
"(",
"self",
",",
"dependency_path",
")",
":",
"stored_hash",
"=",
"self",
".",
"_stamp_file_hashes",
".",
"get",
"(",
"dependency_path",
")",
"# This file was newly added, or we don't have a file",
"# with stored hashes yet. Assume out of date.",
"if",
"not",
"stored_hash",
":",
"return",
"False",
"return",
"stored_hash",
"==",
"_sha1_for_file",
"(",
"dependency_path",
")"
] | Check if mtime of dependency_path is greater than stored mtime. | [
"Check",
"if",
"mtime",
"of",
"dependency_path",
"is",
"greater",
"than",
"stored",
"mtime",
"."
] | 49b4dec93b38c9db55643226a9788c675a53ef25 | https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L96-L105 |
248,732 | polysquare/jobstamps | jobstamps/jobstamp.py | HashMethod.update_stampfile_hook | def update_stampfile_hook(self, dependencies): # suppress(no-self-use)
"""Loop over all dependencies and store hash for each of them."""
hashes = {d: _sha1_for_file(d) for d in dependencies
if os.path.exists(d)}
with open(self._stamp_file_hashes_path, "wb") as hashes_file:
hashes_file.write(json.dumps(hashes).encode("utf-8")) | python | def update_stampfile_hook(self, dependencies): # suppress(no-self-use)
"""Loop over all dependencies and store hash for each of them."""
hashes = {d: _sha1_for_file(d) for d in dependencies
if os.path.exists(d)}
with open(self._stamp_file_hashes_path, "wb") as hashes_file:
hashes_file.write(json.dumps(hashes).encode("utf-8")) | [
"def",
"update_stampfile_hook",
"(",
"self",
",",
"dependencies",
")",
":",
"# suppress(no-self-use)",
"hashes",
"=",
"{",
"d",
":",
"_sha1_for_file",
"(",
"d",
")",
"for",
"d",
"in",
"dependencies",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"d",
")",
"}",
"with",
"open",
"(",
"self",
".",
"_stamp_file_hashes_path",
",",
"\"wb\"",
")",
"as",
"hashes_file",
":",
"hashes_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"hashes",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
] | Loop over all dependencies and store hash for each of them. | [
"Loop",
"over",
"all",
"dependencies",
"and",
"store",
"hash",
"for",
"each",
"of",
"them",
"."
] | 49b4dec93b38c9db55643226a9788c675a53ef25 | https://github.com/polysquare/jobstamps/blob/49b4dec93b38c9db55643226a9788c675a53ef25/jobstamps/jobstamp.py#L107-L112 |
248,733 | markomanninen/abnum | abnum/main.py | Abnum.unicode_value | def unicode_value(self, string):
"""
String argument must be in unicode format.
"""
result = 0
# don't accept strings that contain numbers
if self.regex_has_numbers.search(string):
raise AbnumException(error_msg % string)
else:
num_str = self.regex_values.sub(lambda x: '%s ' % self.values[x.group()],
string)
# don't accept strings, that contains letters which haven't been be converted to numbers
try:
result = sum([int(i) for i in num_str.split()])
except Exception as e:
raise AbnumException(error_msg % string)
return result | python | def unicode_value(self, string):
"""
String argument must be in unicode format.
"""
result = 0
# don't accept strings that contain numbers
if self.regex_has_numbers.search(string):
raise AbnumException(error_msg % string)
else:
num_str = self.regex_values.sub(lambda x: '%s ' % self.values[x.group()],
string)
# don't accept strings, that contains letters which haven't been be converted to numbers
try:
result = sum([int(i) for i in num_str.split()])
except Exception as e:
raise AbnumException(error_msg % string)
return result | [
"def",
"unicode_value",
"(",
"self",
",",
"string",
")",
":",
"result",
"=",
"0",
"# don't accept strings that contain numbers",
"if",
"self",
".",
"regex_has_numbers",
".",
"search",
"(",
"string",
")",
":",
"raise",
"AbnumException",
"(",
"error_msg",
"%",
"string",
")",
"else",
":",
"num_str",
"=",
"self",
".",
"regex_values",
".",
"sub",
"(",
"lambda",
"x",
":",
"'%s '",
"%",
"self",
".",
"values",
"[",
"x",
".",
"group",
"(",
")",
"]",
",",
"string",
")",
"# don't accept strings, that contains letters which haven't been be converted to numbers",
"try",
":",
"result",
"=",
"sum",
"(",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"num_str",
".",
"split",
"(",
")",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"AbnumException",
"(",
"error_msg",
"%",
"string",
")",
"return",
"result"
] | String argument must be in unicode format. | [
"String",
"argument",
"must",
"be",
"in",
"unicode",
"format",
"."
] | 9bfc8f06f34d9a51aab038638f87e2bb5f9f4c99 | https://github.com/markomanninen/abnum/blob/9bfc8f06f34d9a51aab038638f87e2bb5f9f4c99/abnum/main.py#L65-L81 |
248,734 | ionata/dj-core | dj_core/utils.py | import_from_string | def import_from_string(value):
"""Copy of rest_framework.settings.import_from_string"""
value = value.replace('-', '_')
try:
module_path, class_name = value.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as ex:
raise ImportError("Could not import '{}'. {}: {}.".format(
value, ex.__class__.__name__, ex)) | python | def import_from_string(value):
"""Copy of rest_framework.settings.import_from_string"""
value = value.replace('-', '_')
try:
module_path, class_name = value.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as ex:
raise ImportError("Could not import '{}'. {}: {}.".format(
value, ex.__class__.__name__, ex)) | [
"def",
"import_from_string",
"(",
"value",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"try",
":",
"module_path",
",",
"class_name",
"=",
"value",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"module",
"=",
"import_module",
"(",
"module_path",
")",
"return",
"getattr",
"(",
"module",
",",
"class_name",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
"as",
"ex",
":",
"raise",
"ImportError",
"(",
"\"Could not import '{}'. {}: {}.\"",
".",
"format",
"(",
"value",
",",
"ex",
".",
"__class__",
".",
"__name__",
",",
"ex",
")",
")"
] | Copy of rest_framework.settings.import_from_string | [
"Copy",
"of",
"rest_framework",
".",
"settings",
".",
"import_from_string"
] | 7a3139fc433c17f27e7dc2cee8775db21e0b5c89 | https://github.com/ionata/dj-core/blob/7a3139fc433c17f27e7dc2cee8775db21e0b5c89/dj_core/utils.py#L70-L79 |
248,735 | gebn/nibble | nibble/__main__.py | _parse_args | def _parse_args(args):
"""
Interpret command line arguments.
:param args: `sys.argv`
:return: The populated argparse namespace.
"""
parser = argparse.ArgumentParser(prog='nibble',
description='Speed, distance and time '
'calculations around '
'quantities of digital '
'information.')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s ' + nibble.__version__)
parser.add_argument('-v', '--verbosity',
help='increase output verbosity',
action='count',
default=0)
parser.add_argument('expression',
type=util.decode_cli_arg,
nargs='+',
help='the calculation to execute')
return parser.parse_args(args[1:]) | python | def _parse_args(args):
"""
Interpret command line arguments.
:param args: `sys.argv`
:return: The populated argparse namespace.
"""
parser = argparse.ArgumentParser(prog='nibble',
description='Speed, distance and time '
'calculations around '
'quantities of digital '
'information.')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s ' + nibble.__version__)
parser.add_argument('-v', '--verbosity',
help='increase output verbosity',
action='count',
default=0)
parser.add_argument('expression',
type=util.decode_cli_arg,
nargs='+',
help='the calculation to execute')
return parser.parse_args(args[1:]) | [
"def",
"_parse_args",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'nibble'",
",",
"description",
"=",
"'Speed, distance and time '",
"'calculations around '",
"'quantities of digital '",
"'information.'",
")",
"parser",
".",
"add_argument",
"(",
"'-V'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s '",
"+",
"nibble",
".",
"__version__",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbosity'",
",",
"help",
"=",
"'increase output verbosity'",
",",
"action",
"=",
"'count'",
",",
"default",
"=",
"0",
")",
"parser",
".",
"add_argument",
"(",
"'expression'",
",",
"type",
"=",
"util",
".",
"decode_cli_arg",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'the calculation to execute'",
")",
"return",
"parser",
".",
"parse_args",
"(",
"args",
"[",
"1",
":",
"]",
")"
] | Interpret command line arguments.
:param args: `sys.argv`
:return: The populated argparse namespace. | [
"Interpret",
"command",
"line",
"arguments",
"."
] | e82a2c43509ed38f3d039040591cc630fa676cb0 | https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/__main__.py#L14-L38 |
248,736 | gebn/nibble | nibble/__main__.py | main | def main(args):
"""
Nibble's entry point.
:param args: Command-line arguments, with the program in position 0.
"""
args = _parse_args(args)
# sort out logging output and level
level = util.log_level_from_vebosity(args.verbosity)
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
root.addHandler(handler)
logger.debug(args)
expression = ' '.join(args.expression)
try:
print(Parser().parse(expression))
except (LexingError, ParsingError) as e:
util.print_error(e)
return 1
return 0 | python | def main(args):
"""
Nibble's entry point.
:param args: Command-line arguments, with the program in position 0.
"""
args = _parse_args(args)
# sort out logging output and level
level = util.log_level_from_vebosity(args.verbosity)
root = logging.getLogger()
root.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
root.addHandler(handler)
logger.debug(args)
expression = ' '.join(args.expression)
try:
print(Parser().parse(expression))
except (LexingError, ParsingError) as e:
util.print_error(e)
return 1
return 0 | [
"def",
"main",
"(",
"args",
")",
":",
"args",
"=",
"_parse_args",
"(",
"args",
")",
"# sort out logging output and level",
"level",
"=",
"util",
".",
"log_level_from_vebosity",
"(",
"args",
".",
"verbosity",
")",
"root",
"=",
"logging",
".",
"getLogger",
"(",
")",
"root",
".",
"setLevel",
"(",
"level",
")",
"handler",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stdout",
")",
"handler",
".",
"setLevel",
"(",
"level",
")",
"handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'%(levelname)s %(message)s'",
")",
")",
"root",
".",
"addHandler",
"(",
"handler",
")",
"logger",
".",
"debug",
"(",
"args",
")",
"expression",
"=",
"' '",
".",
"join",
"(",
"args",
".",
"expression",
")",
"try",
":",
"print",
"(",
"Parser",
"(",
")",
".",
"parse",
"(",
"expression",
")",
")",
"except",
"(",
"LexingError",
",",
"ParsingError",
")",
"as",
"e",
":",
"util",
".",
"print_error",
"(",
"e",
")",
"return",
"1",
"return",
"0"
] | Nibble's entry point.
:param args: Command-line arguments, with the program in position 0. | [
"Nibble",
"s",
"entry",
"point",
"."
] | e82a2c43509ed38f3d039040591cc630fa676cb0 | https://github.com/gebn/nibble/blob/e82a2c43509ed38f3d039040591cc630fa676cb0/nibble/__main__.py#L41-L68 |
248,737 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/action_plugins/normal.py | ActionModule.run | def run(self, conn, tmp, module_name, module_args, inject):
''' transfer & execute a module that is not 'copy' or 'template' '''
# shell and command are the same module
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
return self.runner._execute_module(conn, tmp, module_name, module_args, inject=inject) | python | def run(self, conn, tmp, module_name, module_args, inject):
''' transfer & execute a module that is not 'copy' or 'template' '''
# shell and command are the same module
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
return self.runner._execute_module(conn, tmp, module_name, module_args, inject=inject) | [
"def",
"run",
"(",
"self",
",",
"conn",
",",
"tmp",
",",
"module_name",
",",
"module_args",
",",
"inject",
")",
":",
"# shell and command are the same module",
"if",
"module_name",
"==",
"'shell'",
":",
"module_name",
"=",
"'command'",
"module_args",
"+=",
"\" #USE_SHELL\"",
"vv",
"(",
"\"REMOTE_MODULE %s %s\"",
"%",
"(",
"module_name",
",",
"module_args",
")",
",",
"host",
"=",
"conn",
".",
"host",
")",
"return",
"self",
".",
"runner",
".",
"_execute_module",
"(",
"conn",
",",
"tmp",
",",
"module_name",
",",
"module_args",
",",
"inject",
"=",
"inject",
")"
] | transfer & execute a module that is not 'copy' or 'template' | [
"transfer",
"&",
"execute",
"a",
"module",
"that",
"is",
"not",
"copy",
"or",
"template"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/action_plugins/normal.py#L36-L45 |
248,738 | PSU-OIT-ARC/elasticmodels | elasticmodels/management/commands/__init__.py | get_models | def get_models(args):
"""
Parse a list of ModelName, appname or appname.ModelName list, and return
the list of model classes in the IndexRegistry. If the list if falsy,
return all the models in the registry.
"""
if args:
models = []
for arg in args:
match_found = False
for model in registry.get_models():
if model._meta.app_label == arg:
models.append(model)
match_found = True
elif '%s.%s' % (model._meta.app_label, model._meta.model_name) == arg:
models.append(model)
match_found = True
if not match_found:
raise ValueError("No model or app named %s" % arg)
else:
models = registry.get_models()
return set(models) | python | def get_models(args):
"""
Parse a list of ModelName, appname or appname.ModelName list, and return
the list of model classes in the IndexRegistry. If the list if falsy,
return all the models in the registry.
"""
if args:
models = []
for arg in args:
match_found = False
for model in registry.get_models():
if model._meta.app_label == arg:
models.append(model)
match_found = True
elif '%s.%s' % (model._meta.app_label, model._meta.model_name) == arg:
models.append(model)
match_found = True
if not match_found:
raise ValueError("No model or app named %s" % arg)
else:
models = registry.get_models()
return set(models) | [
"def",
"get_models",
"(",
"args",
")",
":",
"if",
"args",
":",
"models",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"match_found",
"=",
"False",
"for",
"model",
"in",
"registry",
".",
"get_models",
"(",
")",
":",
"if",
"model",
".",
"_meta",
".",
"app_label",
"==",
"arg",
":",
"models",
".",
"append",
"(",
"model",
")",
"match_found",
"=",
"True",
"elif",
"'%s.%s'",
"%",
"(",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
".",
"_meta",
".",
"model_name",
")",
"==",
"arg",
":",
"models",
".",
"append",
"(",
"model",
")",
"match_found",
"=",
"True",
"if",
"not",
"match_found",
":",
"raise",
"ValueError",
"(",
"\"No model or app named %s\"",
"%",
"arg",
")",
"else",
":",
"models",
"=",
"registry",
".",
"get_models",
"(",
")",
"return",
"set",
"(",
"models",
")"
] | Parse a list of ModelName, appname or appname.ModelName list, and return
the list of model classes in the IndexRegistry. If the list if falsy,
return all the models in the registry. | [
"Parse",
"a",
"list",
"of",
"ModelName",
"appname",
"or",
"appname",
".",
"ModelName",
"list",
"and",
"return",
"the",
"list",
"of",
"model",
"classes",
"in",
"the",
"IndexRegistry",
".",
"If",
"the",
"list",
"if",
"falsy",
"return",
"all",
"the",
"models",
"in",
"the",
"registry",
"."
] | 67870508096f66123ef10b89789bbac06571cc80 | https://github.com/PSU-OIT-ARC/elasticmodels/blob/67870508096f66123ef10b89789bbac06571cc80/elasticmodels/management/commands/__init__.py#L3-L26 |
248,739 | ryanjdillon/pyotelem | pyotelem/plots/plotdives.py | plot_dives | def plot_dives(dv0, dv1, p, dp, t_on, t_off):
'''Plots depths and delta depths with dive start stop markers
Args
----
dv0: int
Index position of dive start in cue array
dv1: int
Index position of dive stop in cue array
p: ndarray
Depth values
dp: ndarray
Delta depths
t_on: ndarray
Cue array with start index position of dives
t_off: ndarray
Cue array with stop index postition of dives
'''
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
x0 = t_on[dv0:dv1] - t_on[dv0]
x1 = t_off[dv0:dv1] - t_on[dv0]
# Extract start end depths
y0_p = p[t_on[dv0:dv1]]
y1_p = p[t_off[dv0:dv1]]
# Extract start end delta depths
y0_dp = dp[t_on[dv0:dv1]]
y1_dp = dp[t_off[dv0:dv1]]
start = t_on[dv0]
stop = t_off[dv1]
ax1.title.set_text('Dives depths')
ax1.plot(range(len(p[start:stop])), p[start:stop])
ax1.scatter(x0, y0_p, label='start')
ax1.scatter(x1, y1_p, label='stop')
ax1.set_ylabel('depth (m)')
ax1.title.set_text('Depth rate of change')
ax2.plot(range(len(dp[start:stop])), dp[start:stop])
ax2.scatter(x0, y0_dp, label='start')
ax2.scatter(x1, y1_dp, label='stop')
ax2.set_ylabel('depth (dm/t)')
ax2.set_xlabel('sample')
for ax in [ax1, ax2]:
ax.legend(loc='upper right')
ax.set_xlim([-50, len(dp[start:stop])+50])
plt.show()
return None | python | def plot_dives(dv0, dv1, p, dp, t_on, t_off):
'''Plots depths and delta depths with dive start stop markers
Args
----
dv0: int
Index position of dive start in cue array
dv1: int
Index position of dive stop in cue array
p: ndarray
Depth values
dp: ndarray
Delta depths
t_on: ndarray
Cue array with start index position of dives
t_off: ndarray
Cue array with stop index postition of dives
'''
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
x0 = t_on[dv0:dv1] - t_on[dv0]
x1 = t_off[dv0:dv1] - t_on[dv0]
# Extract start end depths
y0_p = p[t_on[dv0:dv1]]
y1_p = p[t_off[dv0:dv1]]
# Extract start end delta depths
y0_dp = dp[t_on[dv0:dv1]]
y1_dp = dp[t_off[dv0:dv1]]
start = t_on[dv0]
stop = t_off[dv1]
ax1.title.set_text('Dives depths')
ax1.plot(range(len(p[start:stop])), p[start:stop])
ax1.scatter(x0, y0_p, label='start')
ax1.scatter(x1, y1_p, label='stop')
ax1.set_ylabel('depth (m)')
ax1.title.set_text('Depth rate of change')
ax2.plot(range(len(dp[start:stop])), dp[start:stop])
ax2.scatter(x0, y0_dp, label='start')
ax2.scatter(x1, y1_dp, label='stop')
ax2.set_ylabel('depth (dm/t)')
ax2.set_xlabel('sample')
for ax in [ax1, ax2]:
ax.legend(loc='upper right')
ax.set_xlim([-50, len(dp[start:stop])+50])
plt.show()
return None | [
"def",
"plot_dives",
"(",
"dv0",
",",
"dv1",
",",
"p",
",",
"dp",
",",
"t_on",
",",
"t_off",
")",
":",
"fig",
",",
"(",
"ax1",
",",
"ax2",
")",
"=",
"plt",
".",
"subplots",
"(",
"2",
",",
"1",
",",
"sharex",
"=",
"True",
")",
"x0",
"=",
"t_on",
"[",
"dv0",
":",
"dv1",
"]",
"-",
"t_on",
"[",
"dv0",
"]",
"x1",
"=",
"t_off",
"[",
"dv0",
":",
"dv1",
"]",
"-",
"t_on",
"[",
"dv0",
"]",
"# Extract start end depths",
"y0_p",
"=",
"p",
"[",
"t_on",
"[",
"dv0",
":",
"dv1",
"]",
"]",
"y1_p",
"=",
"p",
"[",
"t_off",
"[",
"dv0",
":",
"dv1",
"]",
"]",
"# Extract start end delta depths",
"y0_dp",
"=",
"dp",
"[",
"t_on",
"[",
"dv0",
":",
"dv1",
"]",
"]",
"y1_dp",
"=",
"dp",
"[",
"t_off",
"[",
"dv0",
":",
"dv1",
"]",
"]",
"start",
"=",
"t_on",
"[",
"dv0",
"]",
"stop",
"=",
"t_off",
"[",
"dv1",
"]",
"ax1",
".",
"title",
".",
"set_text",
"(",
"'Dives depths'",
")",
"ax1",
".",
"plot",
"(",
"range",
"(",
"len",
"(",
"p",
"[",
"start",
":",
"stop",
"]",
")",
")",
",",
"p",
"[",
"start",
":",
"stop",
"]",
")",
"ax1",
".",
"scatter",
"(",
"x0",
",",
"y0_p",
",",
"label",
"=",
"'start'",
")",
"ax1",
".",
"scatter",
"(",
"x1",
",",
"y1_p",
",",
"label",
"=",
"'stop'",
")",
"ax1",
".",
"set_ylabel",
"(",
"'depth (m)'",
")",
"ax1",
".",
"title",
".",
"set_text",
"(",
"'Depth rate of change'",
")",
"ax2",
".",
"plot",
"(",
"range",
"(",
"len",
"(",
"dp",
"[",
"start",
":",
"stop",
"]",
")",
")",
",",
"dp",
"[",
"start",
":",
"stop",
"]",
")",
"ax2",
".",
"scatter",
"(",
"x0",
",",
"y0_dp",
",",
"label",
"=",
"'start'",
")",
"ax2",
".",
"scatter",
"(",
"x1",
",",
"y1_dp",
",",
"label",
"=",
"'stop'",
")",
"ax2",
".",
"set_ylabel",
"(",
"'depth (dm/t)'",
")",
"ax2",
".",
"set_xlabel",
"(",
"'sample'",
")",
"for",
"ax",
"in",
"[",
"ax1",
",",
"ax2",
"]",
":",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"-",
"50",
",",
"len",
"(",
"dp",
"[",
"start",
":",
"stop",
"]",
")",
"+",
"50",
"]",
")",
"plt",
".",
"show",
"(",
")",
"return",
"None"
] | Plots depths and delta depths with dive start stop markers
Args
----
dv0: int
Index position of dive start in cue array
dv1: int
Index position of dive stop in cue array
p: ndarray
Depth values
dp: ndarray
Delta depths
t_on: ndarray
Cue array with start index position of dives
t_off: ndarray
Cue array with stop index postition of dives | [
"Plots",
"depths",
"and",
"delta",
"depths",
"with",
"dive",
"start",
"stop",
"markers"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotdives.py#L9-L63 |
248,740 | ryanjdillon/pyotelem | pyotelem/plots/plotdives.py | plot_dives_pitch | def plot_dives_pitch(depths, dive_mask, des, asc, pitch, pitch_lf):
'''Plot dives with phase and associated pitch angle with HF signal
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
pitch: ndarray
Pitch angle derived from acceleromter data
pitch_lf: ndarray
Low-pass filtered derived pitch angle data
'''
import copy
import numpy
from . import plotutils
fig, (ax1, ax2) = plt.subplots(2,1, sharex=True)
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0],
'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1],
'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
ax2.title.set_text('Pitch and Low-pass filtered pitch')
ax2.plot(range(len(pitch)), pitch, color=_colors[2], linewidth=_linewidth,
label='pitch')
ax2.plot(range(len(pitch_lf)), pitch_lf, color=_colors[3],
linewidth=_linewidth, label='pitch filtered')
ax2.legend(loc='upper right')
ax2.yaxis.label.set_text('Radians')
ax2.yaxis.label.set_text('Samples')
plt.show()
return None | python | def plot_dives_pitch(depths, dive_mask, des, asc, pitch, pitch_lf):
'''Plot dives with phase and associated pitch angle with HF signal
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
pitch: ndarray
Pitch angle derived from acceleromter data
pitch_lf: ndarray
Low-pass filtered derived pitch angle data
'''
import copy
import numpy
from . import plotutils
fig, (ax1, ax2) = plt.subplots(2,1, sharex=True)
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0],
'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1],
'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
ax2.title.set_text('Pitch and Low-pass filtered pitch')
ax2.plot(range(len(pitch)), pitch, color=_colors[2], linewidth=_linewidth,
label='pitch')
ax2.plot(range(len(pitch_lf)), pitch_lf, color=_colors[3],
linewidth=_linewidth, label='pitch filtered')
ax2.legend(loc='upper right')
ax2.yaxis.label.set_text('Radians')
ax2.yaxis.label.set_text('Samples')
plt.show()
return None | [
"def",
"plot_dives_pitch",
"(",
"depths",
",",
"dive_mask",
",",
"des",
",",
"asc",
",",
"pitch",
",",
"pitch_lf",
")",
":",
"import",
"copy",
"import",
"numpy",
"from",
".",
"import",
"plotutils",
"fig",
",",
"(",
"ax1",
",",
"ax2",
")",
"=",
"plt",
".",
"subplots",
"(",
"2",
",",
"1",
",",
"sharex",
"=",
"True",
")",
"des_ind",
"=",
"numpy",
".",
"where",
"(",
"dive_mask",
"&",
"des",
")",
"[",
"0",
"]",
"asc_ind",
"=",
"numpy",
".",
"where",
"(",
"dive_mask",
"&",
"asc",
")",
"[",
"0",
"]",
"ax1",
".",
"title",
".",
"set_text",
"(",
"'Dive descents and ascents'",
")",
"ax1",
"=",
"plotutils",
".",
"plot_noncontiguous",
"(",
"ax1",
",",
"depths",
",",
"des_ind",
",",
"_colors",
"[",
"0",
"]",
",",
"'descents'",
")",
"ax1",
"=",
"plotutils",
".",
"plot_noncontiguous",
"(",
"ax1",
",",
"depths",
",",
"asc_ind",
",",
"_colors",
"[",
"1",
"]",
",",
"'ascents'",
")",
"ax1",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax1",
".",
"invert_yaxis",
"(",
")",
"ax1",
".",
"yaxis",
".",
"label",
".",
"set_text",
"(",
"'depth (m)'",
")",
"ax1",
".",
"xaxis",
".",
"label",
".",
"set_text",
"(",
"'samples'",
")",
"ax2",
".",
"title",
".",
"set_text",
"(",
"'Pitch and Low-pass filtered pitch'",
")",
"ax2",
".",
"plot",
"(",
"range",
"(",
"len",
"(",
"pitch",
")",
")",
",",
"pitch",
",",
"color",
"=",
"_colors",
"[",
"2",
"]",
",",
"linewidth",
"=",
"_linewidth",
",",
"label",
"=",
"'pitch'",
")",
"ax2",
".",
"plot",
"(",
"range",
"(",
"len",
"(",
"pitch_lf",
")",
")",
",",
"pitch_lf",
",",
"color",
"=",
"_colors",
"[",
"3",
"]",
",",
"linewidth",
"=",
"_linewidth",
",",
"label",
"=",
"'pitch filtered'",
")",
"ax2",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax2",
".",
"yaxis",
".",
"label",
".",
"set_text",
"(",
"'Radians'",
")",
"ax2",
".",
"yaxis",
".",
"label",
".",
"set_text",
"(",
"'Samples'",
")",
"plt",
".",
"show",
"(",
")",
"return",
"None"
] | Plot dives with phase and associated pitch angle with HF signal
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
pitch: ndarray
Pitch angle derived from acceleromter data
pitch_lf: ndarray
Low-pass filtered derived pitch angle data | [
"Plot",
"dives",
"with",
"phase",
"and",
"associated",
"pitch",
"angle",
"with",
"HF",
"signal"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotdives.py#L66-L117 |
248,741 | ryanjdillon/pyotelem | pyotelem/plots/plotdives.py | plot_depth_descent_ascent | def plot_depth_descent_ascent(depths, dive_mask, des, asc):
'''Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
'''
import numpy
from . import plotutils
# Indices where depths are descents or ascents
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
fig, ax1 = plt.subplots()
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0],
'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1],
'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
plt.show()
return None | python | def plot_depth_descent_ascent(depths, dive_mask, des, asc):
'''Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
'''
import numpy
from . import plotutils
# Indices where depths are descents or ascents
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
fig, ax1 = plt.subplots()
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0],
'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1],
'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
plt.show()
return None | [
"def",
"plot_depth_descent_ascent",
"(",
"depths",
",",
"dive_mask",
",",
"des",
",",
"asc",
")",
":",
"import",
"numpy",
"from",
".",
"import",
"plotutils",
"# Indices where depths are descents or ascents",
"des_ind",
"=",
"numpy",
".",
"where",
"(",
"dive_mask",
"&",
"des",
")",
"[",
"0",
"]",
"asc_ind",
"=",
"numpy",
".",
"where",
"(",
"dive_mask",
"&",
"asc",
")",
"[",
"0",
"]",
"fig",
",",
"ax1",
"=",
"plt",
".",
"subplots",
"(",
")",
"ax1",
".",
"title",
".",
"set_text",
"(",
"'Dive descents and ascents'",
")",
"ax1",
"=",
"plotutils",
".",
"plot_noncontiguous",
"(",
"ax1",
",",
"depths",
",",
"des_ind",
",",
"_colors",
"[",
"0",
"]",
",",
"'descents'",
")",
"ax1",
"=",
"plotutils",
".",
"plot_noncontiguous",
"(",
"ax1",
",",
"depths",
",",
"asc_ind",
",",
"_colors",
"[",
"1",
"]",
",",
"'ascents'",
")",
"ax1",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax1",
".",
"invert_yaxis",
"(",
")",
"ax1",
".",
"yaxis",
".",
"label",
".",
"set_text",
"(",
"'depth (m)'",
")",
"ax1",
".",
"xaxis",
".",
"label",
".",
"set_text",
"(",
"'samples'",
")",
"plt",
".",
"show",
"(",
")",
"return",
"None"
] | Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta | [
"Plot",
"depth",
"data",
"for",
"whole",
"deployment",
"descents",
"and",
"ascents"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotdives.py#L120-L157 |
248,742 | henrysher/kotocore | kotocore/resources.py | Resource._update_docstrings | def _update_docstrings(self):
"""
Runs through the operation methods & updates their docstrings if
necessary.
If the method has the default placeholder docstring, this will replace
it with the docstring from the underlying connection.
"""
ops = self._details.resource_data['operations']
for method_name in ops.keys():
meth = getattr(self.__class__, method_name, None)
if not meth:
continue
if meth.__doc__ != DEFAULT_DOCSTRING:
# It already has a custom docstring. Leave it alone.
continue
# Needs updating. So there's at least *something* vaguely useful
# there, use the docstring from the underlying ``Connection``
# method.
# FIXME: We need to figure out a way to make this more useful, if
# possible.
api_name = ops[method_name]['api_name']
conn_meth = getattr(self._connection, to_snake_case(api_name))
# We need to do detection here, because Py2 treats ``.__doc__``
# as a special read-only attribute. :/
if six.PY3:
meth.__doc__ = conn_meth.__doc__
else:
meth.__func__.__doc__ = conn_meth.__doc__ | python | def _update_docstrings(self):
"""
Runs through the operation methods & updates their docstrings if
necessary.
If the method has the default placeholder docstring, this will replace
it with the docstring from the underlying connection.
"""
ops = self._details.resource_data['operations']
for method_name in ops.keys():
meth = getattr(self.__class__, method_name, None)
if not meth:
continue
if meth.__doc__ != DEFAULT_DOCSTRING:
# It already has a custom docstring. Leave it alone.
continue
# Needs updating. So there's at least *something* vaguely useful
# there, use the docstring from the underlying ``Connection``
# method.
# FIXME: We need to figure out a way to make this more useful, if
# possible.
api_name = ops[method_name]['api_name']
conn_meth = getattr(self._connection, to_snake_case(api_name))
# We need to do detection here, because Py2 treats ``.__doc__``
# as a special read-only attribute. :/
if six.PY3:
meth.__doc__ = conn_meth.__doc__
else:
meth.__func__.__doc__ = conn_meth.__doc__ | [
"def",
"_update_docstrings",
"(",
"self",
")",
":",
"ops",
"=",
"self",
".",
"_details",
".",
"resource_data",
"[",
"'operations'",
"]",
"for",
"method_name",
"in",
"ops",
".",
"keys",
"(",
")",
":",
"meth",
"=",
"getattr",
"(",
"self",
".",
"__class__",
",",
"method_name",
",",
"None",
")",
"if",
"not",
"meth",
":",
"continue",
"if",
"meth",
".",
"__doc__",
"!=",
"DEFAULT_DOCSTRING",
":",
"# It already has a custom docstring. Leave it alone.",
"continue",
"# Needs updating. So there's at least *something* vaguely useful",
"# there, use the docstring from the underlying ``Connection``",
"# method.",
"# FIXME: We need to figure out a way to make this more useful, if",
"# possible.",
"api_name",
"=",
"ops",
"[",
"method_name",
"]",
"[",
"'api_name'",
"]",
"conn_meth",
"=",
"getattr",
"(",
"self",
".",
"_connection",
",",
"to_snake_case",
"(",
"api_name",
")",
")",
"# We need to do detection here, because Py2 treats ``.__doc__``",
"# as a special read-only attribute. :/",
"if",
"six",
".",
"PY3",
":",
"meth",
".",
"__doc__",
"=",
"conn_meth",
".",
"__doc__",
"else",
":",
"meth",
".",
"__func__",
".",
"__doc__",
"=",
"conn_meth",
".",
"__doc__"
] | Runs through the operation methods & updates their docstrings if
necessary.
If the method has the default placeholder docstring, this will replace
it with the docstring from the underlying connection. | [
"Runs",
"through",
"the",
"operation",
"methods",
"&",
"updates",
"their",
"docstrings",
"if",
"necessary",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/resources.py#L238-L271 |
248,743 | henrysher/kotocore | kotocore/resources.py | Resource.build_relation | def build_relation(self, name, klass=None):
"""
Constructs a related ``Resource`` or ``Collection``.
This allows for construction of classes with information prepopulated
from what the current instance has. This enables syntax like::
bucket = Bucket(bucket='some-bucket-name')
for obj in bucket.objects.each():
print(obj.key)
:param name: The name of the relation from the ResourceJSON
:type name: string
:param klass: (Optional) An overridable class to construct. Typically
only useful if you need a custom subclass used in place of what
kotocore provides.
:type klass: class
:returns: An instantiated related object
"""
try:
rel_data = self._details.relations[name]
except KeyError:
msg = "No such relation named '{0}'.".format(name)
raise NoRelation(msg)
if klass is None:
# This is the typical case, where we're not explicitly given a
# class to build with. Hit the session & look up what we should
# be loading.
if rel_data['class_type'] == 'collection':
klass = self._details.session.get_collection(
self._details.service_name,
rel_data['class']
)
elif rel_data['class_type'] == 'resource':
klass = self._details.session.get_resource(
self._details.service_name,
rel_data['class']
)
else:
msg = "Unknown class '{0}' for '{1}'.".format(
rel_data['class_type'],
name
)
raise NoRelation(msg)
# Instantiate & return it.
kwargs = {}
# Just populating identifiers is enough for the 1-M case.
kwargs.update(self.get_identifiers())
if rel_data.get('rel_type', '1-M') == '1-1':
# FIXME: If it's not a collection, we might have some instance data
# (i.e. ``bucket``) in ``self._data`` to populate as well.
# This seems like a can of worms, so ignore for the moment.
pass
return klass(connection=self._connection, **kwargs) | python | def build_relation(self, name, klass=None):
"""
Constructs a related ``Resource`` or ``Collection``.
This allows for construction of classes with information prepopulated
from what the current instance has. This enables syntax like::
bucket = Bucket(bucket='some-bucket-name')
for obj in bucket.objects.each():
print(obj.key)
:param name: The name of the relation from the ResourceJSON
:type name: string
:param klass: (Optional) An overridable class to construct. Typically
only useful if you need a custom subclass used in place of what
kotocore provides.
:type klass: class
:returns: An instantiated related object
"""
try:
rel_data = self._details.relations[name]
except KeyError:
msg = "No such relation named '{0}'.".format(name)
raise NoRelation(msg)
if klass is None:
# This is the typical case, where we're not explicitly given a
# class to build with. Hit the session & look up what we should
# be loading.
if rel_data['class_type'] == 'collection':
klass = self._details.session.get_collection(
self._details.service_name,
rel_data['class']
)
elif rel_data['class_type'] == 'resource':
klass = self._details.session.get_resource(
self._details.service_name,
rel_data['class']
)
else:
msg = "Unknown class '{0}' for '{1}'.".format(
rel_data['class_type'],
name
)
raise NoRelation(msg)
# Instantiate & return it.
kwargs = {}
# Just populating identifiers is enough for the 1-M case.
kwargs.update(self.get_identifiers())
if rel_data.get('rel_type', '1-M') == '1-1':
# FIXME: If it's not a collection, we might have some instance data
# (i.e. ``bucket``) in ``self._data`` to populate as well.
# This seems like a can of worms, so ignore for the moment.
pass
return klass(connection=self._connection, **kwargs) | [
"def",
"build_relation",
"(",
"self",
",",
"name",
",",
"klass",
"=",
"None",
")",
":",
"try",
":",
"rel_data",
"=",
"self",
".",
"_details",
".",
"relations",
"[",
"name",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"\"No such relation named '{0}'.\"",
".",
"format",
"(",
"name",
")",
"raise",
"NoRelation",
"(",
"msg",
")",
"if",
"klass",
"is",
"None",
":",
"# This is the typical case, where we're not explicitly given a",
"# class to build with. Hit the session & look up what we should",
"# be loading.",
"if",
"rel_data",
"[",
"'class_type'",
"]",
"==",
"'collection'",
":",
"klass",
"=",
"self",
".",
"_details",
".",
"session",
".",
"get_collection",
"(",
"self",
".",
"_details",
".",
"service_name",
",",
"rel_data",
"[",
"'class'",
"]",
")",
"elif",
"rel_data",
"[",
"'class_type'",
"]",
"==",
"'resource'",
":",
"klass",
"=",
"self",
".",
"_details",
".",
"session",
".",
"get_resource",
"(",
"self",
".",
"_details",
".",
"service_name",
",",
"rel_data",
"[",
"'class'",
"]",
")",
"else",
":",
"msg",
"=",
"\"Unknown class '{0}' for '{1}'.\"",
".",
"format",
"(",
"rel_data",
"[",
"'class_type'",
"]",
",",
"name",
")",
"raise",
"NoRelation",
"(",
"msg",
")",
"# Instantiate & return it.",
"kwargs",
"=",
"{",
"}",
"# Just populating identifiers is enough for the 1-M case.",
"kwargs",
".",
"update",
"(",
"self",
".",
"get_identifiers",
"(",
")",
")",
"if",
"rel_data",
".",
"get",
"(",
"'rel_type'",
",",
"'1-M'",
")",
"==",
"'1-1'",
":",
"# FIXME: If it's not a collection, we might have some instance data",
"# (i.e. ``bucket``) in ``self._data`` to populate as well.",
"# This seems like a can of worms, so ignore for the moment.",
"pass",
"return",
"klass",
"(",
"connection",
"=",
"self",
".",
"_connection",
",",
"*",
"*",
"kwargs",
")"
] | Constructs a related ``Resource`` or ``Collection``.
This allows for construction of classes with information prepopulated
from what the current instance has. This enables syntax like::
bucket = Bucket(bucket='some-bucket-name')
for obj in bucket.objects.each():
print(obj.key)
:param name: The name of the relation from the ResourceJSON
:type name: string
:param klass: (Optional) An overridable class to construct. Typically
only useful if you need a custom subclass used in place of what
kotocore provides.
:type klass: class
:returns: An instantiated related object | [
"Constructs",
"a",
"related",
"Resource",
"or",
"Collection",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/resources.py#L312-L372 |
248,744 | henrysher/kotocore | kotocore/resources.py | Resource.post_process_get | def post_process_get(self, result):
"""
Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data
"""
if not hasattr(result, 'items'):
# If it's not a dict, give up & just return whatever you get.
return result
# We need to possibly drill into the response & get out the data here.
# Check for a result key.
result_key = self._details.result_key_for('get')
if not result_key:
# There's no result_key. Just use the top-level data.
data = result
else:
data = result[result_key]
for key, value in data.items():
self._data[to_snake_case(key)] = value
return result | python | def post_process_get(self, result):
"""
Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data
"""
if not hasattr(result, 'items'):
# If it's not a dict, give up & just return whatever you get.
return result
# We need to possibly drill into the response & get out the data here.
# Check for a result key.
result_key = self._details.result_key_for('get')
if not result_key:
# There's no result_key. Just use the top-level data.
data = result
else:
data = result[result_key]
for key, value in data.items():
self._data[to_snake_case(key)] = value
return result | [
"def",
"post_process_get",
"(",
"self",
",",
"result",
")",
":",
"if",
"not",
"hasattr",
"(",
"result",
",",
"'items'",
")",
":",
"# If it's not a dict, give up & just return whatever you get.",
"return",
"result",
"# We need to possibly drill into the response & get out the data here.",
"# Check for a result key.",
"result_key",
"=",
"self",
".",
"_details",
".",
"result_key_for",
"(",
"'get'",
")",
"if",
"not",
"result_key",
":",
"# There's no result_key. Just use the top-level data.",
"data",
"=",
"result",
"else",
":",
"data",
"=",
"result",
"[",
"result_key",
"]",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"self",
".",
"_data",
"[",
"to_snake_case",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"result"
] | Given an object with identifiers, fetches the data for that object
from the service.
This alters the data on the object itself & simply passes through what
was received.
:param result: The response data
:type result: dict
:returns: The unmodified response data | [
"Given",
"an",
"object",
"with",
"identifiers",
"fetches",
"the",
"data",
"for",
"that",
"object",
"from",
"the",
"service",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/resources.py#L489-L519 |
248,745 | henrysher/kotocore | kotocore/resources.py | ResourceFactory.construct_for | def construct_for(self, service_name, resource_name, base_class=None):
"""
Builds a new, specialized ``Resource`` subclass as part of a given
service.
This will load the ``ResourceJSON``, determine the correct
mappings/methods & constructs a brand new class with those methods on
it.
:param service_name: The name of the service to construct a resource
for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: The name of the ``Resource``. Ex.
``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:returns: A new resource class for that service
"""
details = self.details_class(
self.session,
service_name,
resource_name,
loader=self.loader
)
attrs = {
'_details': details,
}
# Determine what we should call it.
klass_name = self._build_class_name(resource_name)
# Construct what the class ought to have on it.
attrs.update(self._build_methods(details))
if base_class is None:
base_class = self.base_resource_class
# Create the class.
return type(
klass_name,
(base_class,),
attrs
) | python | def construct_for(self, service_name, resource_name, base_class=None):
"""
Builds a new, specialized ``Resource`` subclass as part of a given
service.
This will load the ``ResourceJSON``, determine the correct
mappings/methods & constructs a brand new class with those methods on
it.
:param service_name: The name of the service to construct a resource
for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: The name of the ``Resource``. Ex.
``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:returns: A new resource class for that service
"""
details = self.details_class(
self.session,
service_name,
resource_name,
loader=self.loader
)
attrs = {
'_details': details,
}
# Determine what we should call it.
klass_name = self._build_class_name(resource_name)
# Construct what the class ought to have on it.
attrs.update(self._build_methods(details))
if base_class is None:
base_class = self.base_resource_class
# Create the class.
return type(
klass_name,
(base_class,),
attrs
) | [
"def",
"construct_for",
"(",
"self",
",",
"service_name",
",",
"resource_name",
",",
"base_class",
"=",
"None",
")",
":",
"details",
"=",
"self",
".",
"details_class",
"(",
"self",
".",
"session",
",",
"service_name",
",",
"resource_name",
",",
"loader",
"=",
"self",
".",
"loader",
")",
"attrs",
"=",
"{",
"'_details'",
":",
"details",
",",
"}",
"# Determine what we should call it.",
"klass_name",
"=",
"self",
".",
"_build_class_name",
"(",
"resource_name",
")",
"# Construct what the class ought to have on it.",
"attrs",
".",
"update",
"(",
"self",
".",
"_build_methods",
"(",
"details",
")",
")",
"if",
"base_class",
"is",
"None",
":",
"base_class",
"=",
"self",
".",
"base_resource_class",
"# Create the class.",
"return",
"type",
"(",
"klass_name",
",",
"(",
"base_class",
",",
")",
",",
"attrs",
")"
] | Builds a new, specialized ``Resource`` subclass as part of a given
service.
This will load the ``ResourceJSON``, determine the correct
mappings/methods & constructs a brand new class with those methods on
it.
:param service_name: The name of the service to construct a resource
for. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: The name of the ``Resource``. Ex.
``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:returns: A new resource class for that service | [
"Builds",
"a",
"new",
"specialized",
"Resource",
"subclass",
"as",
"part",
"of",
"a",
"given",
"service",
"."
] | c52d2f3878b924ceabca07f61c91abcb1b230ecc | https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/resources.py#L580-L624 |
248,746 | awacha/credolib | credolib/io.py | filter_headers | def filter_headers(criterion):
"""Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved.
"""
ip = get_ipython()
for headerkind in ['processed', 'raw']:
for h in ip.user_ns['_headers'][headerkind][:]:
if not criterion(h):
ip.user_ns['_headers'][headerkind].remove(h)
ip.user_ns['allsamplenames'] = {h.title for h in ip.user_ns['_headers']['processed']} | python | def filter_headers(criterion):
"""Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved.
"""
ip = get_ipython()
for headerkind in ['processed', 'raw']:
for h in ip.user_ns['_headers'][headerkind][:]:
if not criterion(h):
ip.user_ns['_headers'][headerkind].remove(h)
ip.user_ns['allsamplenames'] = {h.title for h in ip.user_ns['_headers']['processed']} | [
"def",
"filter_headers",
"(",
"criterion",
")",
":",
"ip",
"=",
"get_ipython",
"(",
")",
"for",
"headerkind",
"in",
"[",
"'processed'",
",",
"'raw'",
"]",
":",
"for",
"h",
"in",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"headerkind",
"]",
"[",
":",
"]",
":",
"if",
"not",
"criterion",
"(",
"h",
")",
":",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"headerkind",
"]",
".",
"remove",
"(",
"h",
")",
"ip",
".",
"user_ns",
"[",
"'allsamplenames'",
"]",
"=",
"{",
"h",
".",
"title",
"for",
"h",
"in",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"'processed'",
"]",
"}"
] | Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved. | [
"Filter",
"already",
"loaded",
"headers",
"against",
"some",
"criterion",
"."
] | 11c0be3eea7257d3d6e13697d3e76ce538f2f1b2 | https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/io.py#L13-L27 |
248,747 | awacha/credolib | credolib/io.py | load_headers | def load_headers(fsns:List[int]):
"""Load header files
"""
ip = get_ipython()
ip.user_ns['_headers'] = {}
for type_ in ['raw', 'processed']:
print("Loading %d headers (%s)" % (len(fsns), type_), flush=True)
processed = type_ == 'processed'
headers = []
for f in fsns:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]:
try:
headers.append(l.loadheader(f))
break
except FileNotFoundError:
continue
allsamplenames = {h.title for h in headers}
if not headers:
print('NO HEADERS READ FOR TYPE "%s"' % type_)
else:
print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns)))
print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers]))
print("Samples covered by these headers:")
print(" " + "\n ".join(sorted(allsamplenames)), flush=True)
if processed:
ip.user_ns['allsamplenames'] = allsamplenames
ip.user_ns['_headers'][type_] = headers | python | def load_headers(fsns:List[int]):
"""Load header files
"""
ip = get_ipython()
ip.user_ns['_headers'] = {}
for type_ in ['raw', 'processed']:
print("Loading %d headers (%s)" % (len(fsns), type_), flush=True)
processed = type_ == 'processed'
headers = []
for f in fsns:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]:
try:
headers.append(l.loadheader(f))
break
except FileNotFoundError:
continue
allsamplenames = {h.title for h in headers}
if not headers:
print('NO HEADERS READ FOR TYPE "%s"' % type_)
else:
print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns)))
print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers]))
print("Samples covered by these headers:")
print(" " + "\n ".join(sorted(allsamplenames)), flush=True)
if processed:
ip.user_ns['allsamplenames'] = allsamplenames
ip.user_ns['_headers'][type_] = headers | [
"def",
"load_headers",
"(",
"fsns",
":",
"List",
"[",
"int",
"]",
")",
":",
"ip",
"=",
"get_ipython",
"(",
")",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"=",
"{",
"}",
"for",
"type_",
"in",
"[",
"'raw'",
",",
"'processed'",
"]",
":",
"print",
"(",
"\"Loading %d headers (%s)\"",
"%",
"(",
"len",
"(",
"fsns",
")",
",",
"type_",
")",
",",
"flush",
"=",
"True",
")",
"processed",
"=",
"type_",
"==",
"'processed'",
"headers",
"=",
"[",
"]",
"for",
"f",
"in",
"fsns",
":",
"for",
"l",
"in",
"[",
"l_",
"for",
"l_",
"in",
"ip",
".",
"user_ns",
"[",
"'_loaders'",
"]",
"if",
"l_",
".",
"processed",
"==",
"processed",
"]",
":",
"try",
":",
"headers",
".",
"append",
"(",
"l",
".",
"loadheader",
"(",
"f",
")",
")",
"break",
"except",
"FileNotFoundError",
":",
"continue",
"allsamplenames",
"=",
"{",
"h",
".",
"title",
"for",
"h",
"in",
"headers",
"}",
"if",
"not",
"headers",
":",
"print",
"(",
"'NO HEADERS READ FOR TYPE \"%s\"'",
"%",
"type_",
")",
"else",
":",
"print",
"(",
"\"%d headers (%s) out of %d have been loaded successfully.\"",
"%",
"(",
"len",
"(",
"headers",
")",
",",
"type_",
",",
"len",
"(",
"fsns",
")",
")",
")",
"print",
"(",
"'Read FSN range:'",
",",
"min",
"(",
"[",
"h",
".",
"fsn",
"for",
"h",
"in",
"headers",
"]",
")",
",",
"'to'",
",",
"max",
"(",
"[",
"h",
".",
"fsn",
"for",
"h",
"in",
"headers",
"]",
")",
")",
"print",
"(",
"\"Samples covered by these headers:\"",
")",
"print",
"(",
"\" \"",
"+",
"\"\\n \"",
".",
"join",
"(",
"sorted",
"(",
"allsamplenames",
")",
")",
",",
"flush",
"=",
"True",
")",
"if",
"processed",
":",
"ip",
".",
"user_ns",
"[",
"'allsamplenames'",
"]",
"=",
"allsamplenames",
"ip",
".",
"user_ns",
"[",
"'_headers'",
"]",
"[",
"type_",
"]",
"=",
"headers"
] | Load header files | [
"Load",
"header",
"files"
] | 11c0be3eea7257d3d6e13697d3e76ce538f2f1b2 | https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/io.py#L29-L55 |
248,748 | hph/mov | mov.py | get_size | def get_size(path):
'''Return the size of path in bytes if it exists and can be determined.'''
size = os.path.getsize(path)
for item in os.walk(path):
for file in item[2]:
size += os.path.getsize(os.path.join(item[0], file))
return size | python | def get_size(path):
'''Return the size of path in bytes if it exists and can be determined.'''
size = os.path.getsize(path)
for item in os.walk(path):
for file in item[2]:
size += os.path.getsize(os.path.join(item[0], file))
return size | [
"def",
"get_size",
"(",
"path",
")",
":",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"for",
"item",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"file",
"in",
"item",
"[",
"2",
"]",
":",
"size",
"+=",
"os",
".",
"path",
".",
"getsize",
"(",
"os",
".",
"path",
".",
"join",
"(",
"item",
"[",
"0",
"]",
",",
"file",
")",
")",
"return",
"size"
] | Return the size of path in bytes if it exists and can be determined. | [
"Return",
"the",
"size",
"of",
"path",
"in",
"bytes",
"if",
"it",
"exists",
"and",
"can",
"be",
"determined",
"."
] | 36a18d92836e1aff74ca02e16ce09d1c46e111b9 | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L52-L58 |
248,749 | hph/mov | mov.py | local_data | def local_data(path):
"""Return tuples of names, directories, total sizes and files. Each
directory represents a single film and the files are the files contained
in the directory, such as video, audio and subtitle files."""
dirs = [os.path.join(path, item) for item in os.listdir(path)]
names, sizes, files = zip(*[(dir.split('/')[-1], str(get_size(dir)),
'##'.join([file for file in os.listdir(dir)]))
for dir in dirs])
return zip(names, dirs, sizes, files) | python | def local_data(path):
"""Return tuples of names, directories, total sizes and files. Each
directory represents a single film and the files are the files contained
in the directory, such as video, audio and subtitle files."""
dirs = [os.path.join(path, item) for item in os.listdir(path)]
names, sizes, files = zip(*[(dir.split('/')[-1], str(get_size(dir)),
'##'.join([file for file in os.listdir(dir)]))
for dir in dirs])
return zip(names, dirs, sizes, files) | [
"def",
"local_data",
"(",
"path",
")",
":",
"dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"item",
")",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
"]",
"names",
",",
"sizes",
",",
"files",
"=",
"zip",
"(",
"*",
"[",
"(",
"dir",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
",",
"str",
"(",
"get_size",
"(",
"dir",
")",
")",
",",
"'##'",
".",
"join",
"(",
"[",
"file",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"dir",
")",
"]",
")",
")",
"for",
"dir",
"in",
"dirs",
"]",
")",
"return",
"zip",
"(",
"names",
",",
"dirs",
",",
"sizes",
",",
"files",
")"
] | Return tuples of names, directories, total sizes and files. Each
directory represents a single film and the files are the files contained
in the directory, such as video, audio and subtitle files. | [
"Return",
"tuples",
"of",
"names",
"directories",
"total",
"sizes",
"and",
"files",
".",
"Each",
"directory",
"represents",
"a",
"single",
"film",
"and",
"the",
"files",
"are",
"the",
"files",
"contained",
"in",
"the",
"directory",
"such",
"as",
"video",
"audio",
"and",
"subtitle",
"files",
"."
] | 36a18d92836e1aff74ca02e16ce09d1c46e111b9 | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L61-L69 |
248,750 | hph/mov | mov.py | create | def create():
"""Create a new database with information about the films in the specified
directory or directories."""
if not all(map(os.path.isdir, ARGS.directory)):
exit('Error: One or more of the specified directories does not exist.')
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
cursor.execute('DROP TABLE IF EXISTS Movies')
cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT,
files BLOB)''')
for dir in ARGS.directory:
cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)',
local_data(dir)) | python | def create():
"""Create a new database with information about the films in the specified
directory or directories."""
if not all(map(os.path.isdir, ARGS.directory)):
exit('Error: One or more of the specified directories does not exist.')
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
cursor.execute('DROP TABLE IF EXISTS Movies')
cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT,
files BLOB)''')
for dir in ARGS.directory:
cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)',
local_data(dir)) | [
"def",
"create",
"(",
")",
":",
"if",
"not",
"all",
"(",
"map",
"(",
"os",
".",
"path",
".",
"isdir",
",",
"ARGS",
".",
"directory",
")",
")",
":",
"exit",
"(",
"'Error: One or more of the specified directories does not exist.'",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"ARGS",
".",
"database",
")",
"as",
"connection",
":",
"connection",
".",
"text_factory",
"=",
"str",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'DROP TABLE IF EXISTS Movies'",
")",
"cursor",
".",
"execute",
"(",
"'''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT,\n files BLOB)'''",
")",
"for",
"dir",
"in",
"ARGS",
".",
"directory",
":",
"cursor",
".",
"executemany",
"(",
"'INSERT INTO Movies VALUES(?, ?, ?, ?)'",
",",
"local_data",
"(",
"dir",
")",
")"
] | Create a new database with information about the films in the specified
directory or directories. | [
"Create",
"a",
"new",
"database",
"with",
"information",
"about",
"the",
"films",
"in",
"the",
"specified",
"directory",
"or",
"directories",
"."
] | 36a18d92836e1aff74ca02e16ce09d1c46e111b9 | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L88-L101 |
248,751 | hph/mov | mov.py | ls | def ls():
"""List all items in the database in a predefined format."""
if not os.path.exists(ARGS.database):
exit('Error: The database does not exist; you must create it first.')
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
if ARGS.pattern:
if not ARGS.strict:
ARGS.pattern = '%{0}%'.format(ARGS.pattern)
cursor.execute('SELECT * FROM Movies WHERE Name LIKE (?)',
[ARGS.pattern])
else:
cursor.execute('SELECT * FROM Movies')
movies = sorted([row for row in cursor])
if ARGS.name:
print '\n'.join([movie[0] for movie in movies])
elif ARGS.location:
print '\n'.join([movie[1] for movie in movies])
elif ARGS.size:
print '\n'.join([prefix_size(int(movie[2])) for movie in movies])
elif ARGS.files:
for movie in movies:
print ', '.join(movie[3].split('##'))
else:
for i, movie in enumerate(movies):
print 'Name:\t\t{0}'.format(movie[0])
print 'Location:\t{0}'.format(movie[1])
print 'Size:\t\t{0}'.format(prefix_size(int(movie[2])))
print 'Files:\t\t{0}'.format(', '.join(movie[3].split('##')))
if not i == len(movies) - 1:
print | python | def ls():
"""List all items in the database in a predefined format."""
if not os.path.exists(ARGS.database):
exit('Error: The database does not exist; you must create it first.')
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
if ARGS.pattern:
if not ARGS.strict:
ARGS.pattern = '%{0}%'.format(ARGS.pattern)
cursor.execute('SELECT * FROM Movies WHERE Name LIKE (?)',
[ARGS.pattern])
else:
cursor.execute('SELECT * FROM Movies')
movies = sorted([row for row in cursor])
if ARGS.name:
print '\n'.join([movie[0] for movie in movies])
elif ARGS.location:
print '\n'.join([movie[1] for movie in movies])
elif ARGS.size:
print '\n'.join([prefix_size(int(movie[2])) for movie in movies])
elif ARGS.files:
for movie in movies:
print ', '.join(movie[3].split('##'))
else:
for i, movie in enumerate(movies):
print 'Name:\t\t{0}'.format(movie[0])
print 'Location:\t{0}'.format(movie[1])
print 'Size:\t\t{0}'.format(prefix_size(int(movie[2])))
print 'Files:\t\t{0}'.format(', '.join(movie[3].split('##')))
if not i == len(movies) - 1:
print | [
"def",
"ls",
"(",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"ARGS",
".",
"database",
")",
":",
"exit",
"(",
"'Error: The database does not exist; you must create it first.'",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"ARGS",
".",
"database",
")",
"as",
"connection",
":",
"connection",
".",
"text_factory",
"=",
"str",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"if",
"ARGS",
".",
"pattern",
":",
"if",
"not",
"ARGS",
".",
"strict",
":",
"ARGS",
".",
"pattern",
"=",
"'%{0}%'",
".",
"format",
"(",
"ARGS",
".",
"pattern",
")",
"cursor",
".",
"execute",
"(",
"'SELECT * FROM Movies WHERE Name LIKE (?)'",
",",
"[",
"ARGS",
".",
"pattern",
"]",
")",
"else",
":",
"cursor",
".",
"execute",
"(",
"'SELECT * FROM Movies'",
")",
"movies",
"=",
"sorted",
"(",
"[",
"row",
"for",
"row",
"in",
"cursor",
"]",
")",
"if",
"ARGS",
".",
"name",
":",
"print",
"'\\n'",
".",
"join",
"(",
"[",
"movie",
"[",
"0",
"]",
"for",
"movie",
"in",
"movies",
"]",
")",
"elif",
"ARGS",
".",
"location",
":",
"print",
"'\\n'",
".",
"join",
"(",
"[",
"movie",
"[",
"1",
"]",
"for",
"movie",
"in",
"movies",
"]",
")",
"elif",
"ARGS",
".",
"size",
":",
"print",
"'\\n'",
".",
"join",
"(",
"[",
"prefix_size",
"(",
"int",
"(",
"movie",
"[",
"2",
"]",
")",
")",
"for",
"movie",
"in",
"movies",
"]",
")",
"elif",
"ARGS",
".",
"files",
":",
"for",
"movie",
"in",
"movies",
":",
"print",
"', '",
".",
"join",
"(",
"movie",
"[",
"3",
"]",
".",
"split",
"(",
"'##'",
")",
")",
"else",
":",
"for",
"i",
",",
"movie",
"in",
"enumerate",
"(",
"movies",
")",
":",
"print",
"'Name:\\t\\t{0}'",
".",
"format",
"(",
"movie",
"[",
"0",
"]",
")",
"print",
"'Location:\\t{0}'",
".",
"format",
"(",
"movie",
"[",
"1",
"]",
")",
"print",
"'Size:\\t\\t{0}'",
".",
"format",
"(",
"prefix_size",
"(",
"int",
"(",
"movie",
"[",
"2",
"]",
")",
")",
")",
"print",
"'Files:\\t\\t{0}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"movie",
"[",
"3",
"]",
".",
"split",
"(",
"'##'",
")",
")",
")",
"if",
"not",
"i",
"==",
"len",
"(",
"movies",
")",
"-",
"1",
":",
"print"
] | List all items in the database in a predefined format. | [
"List",
"all",
"items",
"in",
"the",
"database",
"in",
"a",
"predefined",
"format",
"."
] | 36a18d92836e1aff74ca02e16ce09d1c46e111b9 | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L121-L152 |
248,752 | hph/mov | mov.py | play | def play():
"""Open the matched movie with a media player."""
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
if ARGS.pattern:
if not ARGS.strict:
ARGS.pattern = '%{0}%'.format(ARGS.pattern)
cursor.execute('SELECT * FROM Movies WHERE Name LIKE (?)',
[ARGS.pattern])
try:
path = sorted([row for row in cursor])[0][1]
replace_map = {' ': '\\ ', '"': '\\"', "'": "\\'"}
for key, val in replace_map.iteritems():
path = path.replace(key, val)
os.system('{0} {1} &'.format(ARGS.player, path))
except IndexError:
exit('Error: Movie not found.') | python | def play():
"""Open the matched movie with a media player."""
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
if ARGS.pattern:
if not ARGS.strict:
ARGS.pattern = '%{0}%'.format(ARGS.pattern)
cursor.execute('SELECT * FROM Movies WHERE Name LIKE (?)',
[ARGS.pattern])
try:
path = sorted([row for row in cursor])[0][1]
replace_map = {' ': '\\ ', '"': '\\"', "'": "\\'"}
for key, val in replace_map.iteritems():
path = path.replace(key, val)
os.system('{0} {1} &'.format(ARGS.player, path))
except IndexError:
exit('Error: Movie not found.') | [
"def",
"play",
"(",
")",
":",
"with",
"sqlite3",
".",
"connect",
"(",
"ARGS",
".",
"database",
")",
"as",
"connection",
":",
"connection",
".",
"text_factory",
"=",
"str",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"if",
"ARGS",
".",
"pattern",
":",
"if",
"not",
"ARGS",
".",
"strict",
":",
"ARGS",
".",
"pattern",
"=",
"'%{0}%'",
".",
"format",
"(",
"ARGS",
".",
"pattern",
")",
"cursor",
".",
"execute",
"(",
"'SELECT * FROM Movies WHERE Name LIKE (?)'",
",",
"[",
"ARGS",
".",
"pattern",
"]",
")",
"try",
":",
"path",
"=",
"sorted",
"(",
"[",
"row",
"for",
"row",
"in",
"cursor",
"]",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"replace_map",
"=",
"{",
"' '",
":",
"'\\\\ '",
",",
"'\"'",
":",
"'\\\\\"'",
",",
"\"'\"",
":",
"\"\\\\'\"",
"}",
"for",
"key",
",",
"val",
"in",
"replace_map",
".",
"iteritems",
"(",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"key",
",",
"val",
")",
"os",
".",
"system",
"(",
"'{0} {1} &'",
".",
"format",
"(",
"ARGS",
".",
"player",
",",
"path",
")",
")",
"except",
"IndexError",
":",
"exit",
"(",
"'Error: Movie not found.'",
")"
] | Open the matched movie with a media player. | [
"Open",
"the",
"matched",
"movie",
"with",
"a",
"media",
"player",
"."
] | 36a18d92836e1aff74ca02e16ce09d1c46e111b9 | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L155-L172 |
248,753 | amaas-fintech/amaas-utils-python | amaasutils/random_utils.py | random_string | def random_string(length, numeric_only=False):
"""
Generates a random string of length equal to the length parameter
"""
choices = string.digits if numeric_only else string.ascii_uppercase + string.digits
return ''.join(random.choice(choices) for _ in range(length)) | python | def random_string(length, numeric_only=False):
"""
Generates a random string of length equal to the length parameter
"""
choices = string.digits if numeric_only else string.ascii_uppercase + string.digits
return ''.join(random.choice(choices) for _ in range(length)) | [
"def",
"random_string",
"(",
"length",
",",
"numeric_only",
"=",
"False",
")",
":",
"choices",
"=",
"string",
".",
"digits",
"if",
"numeric_only",
"else",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"choices",
")",
"for",
"_",
"in",
"range",
"(",
"length",
")",
")"
] | Generates a random string of length equal to the length parameter | [
"Generates",
"a",
"random",
"string",
"of",
"length",
"equal",
"to",
"the",
"length",
"parameter"
] | 5aa64ca65ce0c77b513482d943345d94c9ae58e8 | https://github.com/amaas-fintech/amaas-utils-python/blob/5aa64ca65ce0c77b513482d943345d94c9ae58e8/amaasutils/random_utils.py#L10-L15 |
248,754 | amaas-fintech/amaas-utils-python | amaasutils/random_utils.py | random_date | def random_date(start_year=2000, end_year=2020):
"""
Generates a random "sensible" date for use in things like issue dates and maturities
"""
return date(random.randint(start_year, end_year), random.randint(1, 12), random.randint(1, 28)) | python | def random_date(start_year=2000, end_year=2020):
"""
Generates a random "sensible" date for use in things like issue dates and maturities
"""
return date(random.randint(start_year, end_year), random.randint(1, 12), random.randint(1, 28)) | [
"def",
"random_date",
"(",
"start_year",
"=",
"2000",
",",
"end_year",
"=",
"2020",
")",
":",
"return",
"date",
"(",
"random",
".",
"randint",
"(",
"start_year",
",",
"end_year",
")",
",",
"random",
".",
"randint",
"(",
"1",
",",
"12",
")",
",",
"random",
".",
"randint",
"(",
"1",
",",
"28",
")",
")"
] | Generates a random "sensible" date for use in things like issue dates and maturities | [
"Generates",
"a",
"random",
"sensible",
"date",
"for",
"use",
"in",
"things",
"like",
"issue",
"dates",
"and",
"maturities"
] | 5aa64ca65ce0c77b513482d943345d94c9ae58e8 | https://github.com/amaas-fintech/amaas-utils-python/blob/5aa64ca65ce0c77b513482d943345d94c9ae58e8/amaasutils/random_utils.py#L25-L29 |
248,755 | scott-maddox/simpleqw | src/simpleqw/_finite_well.py | _finite_well_energy | def _finite_well_energy(P, n=1, atol=1e-6):
'''
Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`.
'''
assert n > 0 and n <= _finite_well_states(P)
pi_2 = pi / 2.
r = (1 / (P + pi_2)) * (n * pi_2)
eta = n * pi_2 - arcsin(r) - r * P
w = 1 # relaxation parameter (for succesive relaxation)
while True:
assert r <= 1
if abs(eta) < atol:
break
r2 = r ** 2.
sqrt_1mr2 = sqrt(1. - r2)
denom = (1. + P * sqrt_1mr2)
t1 = P * sqrt_1mr2 / denom * eta
# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2
while True:
next_r = (1 - w) * r + w * (r + t1)
# next_r = (1 - w) * r + w * (r + t1 + t2)
next_eta = n * pi_2 - arcsin(next_r) - next_r * P
# decrease w until eta is converging
if abs(next_eta / eta) < 1:
r = next_r
eta = next_eta
break
else:
w *= 0.5
alpha = P * r
E = 2 * (alpha) ** 2 # hbar**2 / (m * L**2)
return E | python | def _finite_well_energy(P, n=1, atol=1e-6):
'''
Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`.
'''
assert n > 0 and n <= _finite_well_states(P)
pi_2 = pi / 2.
r = (1 / (P + pi_2)) * (n * pi_2)
eta = n * pi_2 - arcsin(r) - r * P
w = 1 # relaxation parameter (for succesive relaxation)
while True:
assert r <= 1
if abs(eta) < atol:
break
r2 = r ** 2.
sqrt_1mr2 = sqrt(1. - r2)
denom = (1. + P * sqrt_1mr2)
t1 = P * sqrt_1mr2 / denom * eta
# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2
while True:
next_r = (1 - w) * r + w * (r + t1)
# next_r = (1 - w) * r + w * (r + t1 + t2)
next_eta = n * pi_2 - arcsin(next_r) - next_r * P
# decrease w until eta is converging
if abs(next_eta / eta) < 1:
r = next_r
eta = next_eta
break
else:
w *= 0.5
alpha = P * r
E = 2 * (alpha) ** 2 # hbar**2 / (m * L**2)
return E | [
"def",
"_finite_well_energy",
"(",
"P",
",",
"n",
"=",
"1",
",",
"atol",
"=",
"1e-6",
")",
":",
"assert",
"n",
">",
"0",
"and",
"n",
"<=",
"_finite_well_states",
"(",
"P",
")",
"pi_2",
"=",
"pi",
"/",
"2.",
"r",
"=",
"(",
"1",
"/",
"(",
"P",
"+",
"pi_2",
")",
")",
"*",
"(",
"n",
"*",
"pi_2",
")",
"eta",
"=",
"n",
"*",
"pi_2",
"-",
"arcsin",
"(",
"r",
")",
"-",
"r",
"*",
"P",
"w",
"=",
"1",
"# relaxation parameter (for succesive relaxation)",
"while",
"True",
":",
"assert",
"r",
"<=",
"1",
"if",
"abs",
"(",
"eta",
")",
"<",
"atol",
":",
"break",
"r2",
"=",
"r",
"**",
"2.",
"sqrt_1mr2",
"=",
"sqrt",
"(",
"1.",
"-",
"r2",
")",
"denom",
"=",
"(",
"1.",
"+",
"P",
"*",
"sqrt_1mr2",
")",
"t1",
"=",
"P",
"*",
"sqrt_1mr2",
"/",
"denom",
"*",
"eta",
"# t2 = -r * P / (2 * (1. + P * sqrt_1mr2) ** 3) * eta ** 2",
"while",
"True",
":",
"next_r",
"=",
"(",
"1",
"-",
"w",
")",
"*",
"r",
"+",
"w",
"*",
"(",
"r",
"+",
"t1",
")",
"# next_r = (1 - w) * r + w * (r + t1 + t2)",
"next_eta",
"=",
"n",
"*",
"pi_2",
"-",
"arcsin",
"(",
"next_r",
")",
"-",
"next_r",
"*",
"P",
"# decrease w until eta is converging",
"if",
"abs",
"(",
"next_eta",
"/",
"eta",
")",
"<",
"1",
":",
"r",
"=",
"next_r",
"eta",
"=",
"next_eta",
"break",
"else",
":",
"w",
"*=",
"0.5",
"alpha",
"=",
"P",
"*",
"r",
"E",
"=",
"2",
"*",
"(",
"alpha",
")",
"**",
"2",
"# hbar**2 / (m * L**2)",
"return",
"E"
] | Returns the nth bound-state energy for a finite-potential quantum well
with the given well-strength parameter, `P`. | [
"Returns",
"the",
"nth",
"bound",
"-",
"state",
"energy",
"for",
"a",
"finite",
"-",
"potential",
"quantum",
"well",
"with",
"the",
"given",
"well",
"-",
"strength",
"parameter",
"P",
"."
] | 83c1c7ff1f0bac9ddeb6f00fcbb8fafe6ec97f6b | https://github.com/scott-maddox/simpleqw/blob/83c1c7ff1f0bac9ddeb6f00fcbb8fafe6ec97f6b/src/simpleqw/_finite_well.py#L46-L79 |
248,756 | Pringley/spyglass | spyglass/scraper.py | Scraper.top | def top(self, n=10, cache=None, prefetch=False):
"""Find the most popular torrents.
Return an array of Torrent objects representing the top n torrents. If
the cache option is non-None, override the Scraper's default caching
settings.
Use the prefetch option to hit each Torrent's info page up front
(instead of lazy fetching the info on-demand later).
"""
use_cache = self._use_cache(cache)
if use_cache and len(self._top_cache) >= n:
return self._top_cache[:n]
soup = get(TOP).soup
links = soup.find_all("a", class_="detLink")[:n]
urls = [urlparse.urljoin(TOP, link.get('href')) for link in links]
torrents = [self.torrent_from_url(url, use_cache, prefetch)
for url in urls]
if use_cache:
self._top_cache = torrents
self._add_to_torrent_cache(torrents)
return torrents | python | def top(self, n=10, cache=None, prefetch=False):
"""Find the most popular torrents.
Return an array of Torrent objects representing the top n torrents. If
the cache option is non-None, override the Scraper's default caching
settings.
Use the prefetch option to hit each Torrent's info page up front
(instead of lazy fetching the info on-demand later).
"""
use_cache = self._use_cache(cache)
if use_cache and len(self._top_cache) >= n:
return self._top_cache[:n]
soup = get(TOP).soup
links = soup.find_all("a", class_="detLink")[:n]
urls = [urlparse.urljoin(TOP, link.get('href')) for link in links]
torrents = [self.torrent_from_url(url, use_cache, prefetch)
for url in urls]
if use_cache:
self._top_cache = torrents
self._add_to_torrent_cache(torrents)
return torrents | [
"def",
"top",
"(",
"self",
",",
"n",
"=",
"10",
",",
"cache",
"=",
"None",
",",
"prefetch",
"=",
"False",
")",
":",
"use_cache",
"=",
"self",
".",
"_use_cache",
"(",
"cache",
")",
"if",
"use_cache",
"and",
"len",
"(",
"self",
".",
"_top_cache",
")",
">=",
"n",
":",
"return",
"self",
".",
"_top_cache",
"[",
":",
"n",
"]",
"soup",
"=",
"get",
"(",
"TOP",
")",
".",
"soup",
"links",
"=",
"soup",
".",
"find_all",
"(",
"\"a\"",
",",
"class_",
"=",
"\"detLink\"",
")",
"[",
":",
"n",
"]",
"urls",
"=",
"[",
"urlparse",
".",
"urljoin",
"(",
"TOP",
",",
"link",
".",
"get",
"(",
"'href'",
")",
")",
"for",
"link",
"in",
"links",
"]",
"torrents",
"=",
"[",
"self",
".",
"torrent_from_url",
"(",
"url",
",",
"use_cache",
",",
"prefetch",
")",
"for",
"url",
"in",
"urls",
"]",
"if",
"use_cache",
":",
"self",
".",
"_top_cache",
"=",
"torrents",
"self",
".",
"_add_to_torrent_cache",
"(",
"torrents",
")",
"return",
"torrents"
] | Find the most popular torrents.
Return an array of Torrent objects representing the top n torrents. If
the cache option is non-None, override the Scraper's default caching
settings.
Use the prefetch option to hit each Torrent's info page up front
(instead of lazy fetching the info on-demand later). | [
"Find",
"the",
"most",
"popular",
"torrents",
"."
] | 091d74f34837673af936daa9f462ad8216be9916 | https://github.com/Pringley/spyglass/blob/091d74f34837673af936daa9f462ad8216be9916/spyglass/scraper.py#L15-L37 |
248,757 | Pringley/spyglass | spyglass/scraper.py | Scraper.torrent_from_url | def torrent_from_url(self, url, cache=True, prefetch=False):
"""Create a Torrent object from a given URL.
If the cache option is set, check to see if we already have a Torrent
object representing it. If prefetch is set, automatically query the
torrent's info page to fill in the torrent object. (If prefetch is
false, then the torrent page will be queried lazily on-demand.)
"""
if self._use_cache(cache) and url in self._torrent_cache:
return self._torrent_cache[url]
torrent = Torrent(url, cache, prefetch)
if cache:
self._torrent_cache[url] = torrent
return torrent | python | def torrent_from_url(self, url, cache=True, prefetch=False):
"""Create a Torrent object from a given URL.
If the cache option is set, check to see if we already have a Torrent
object representing it. If prefetch is set, automatically query the
torrent's info page to fill in the torrent object. (If prefetch is
false, then the torrent page will be queried lazily on-demand.)
"""
if self._use_cache(cache) and url in self._torrent_cache:
return self._torrent_cache[url]
torrent = Torrent(url, cache, prefetch)
if cache:
self._torrent_cache[url] = torrent
return torrent | [
"def",
"torrent_from_url",
"(",
"self",
",",
"url",
",",
"cache",
"=",
"True",
",",
"prefetch",
"=",
"False",
")",
":",
"if",
"self",
".",
"_use_cache",
"(",
"cache",
")",
"and",
"url",
"in",
"self",
".",
"_torrent_cache",
":",
"return",
"self",
".",
"_torrent_cache",
"[",
"url",
"]",
"torrent",
"=",
"Torrent",
"(",
"url",
",",
"cache",
",",
"prefetch",
")",
"if",
"cache",
":",
"self",
".",
"_torrent_cache",
"[",
"url",
"]",
"=",
"torrent",
"return",
"torrent"
] | Create a Torrent object from a given URL.
If the cache option is set, check to see if we already have a Torrent
object representing it. If prefetch is set, automatically query the
torrent's info page to fill in the torrent object. (If prefetch is
false, then the torrent page will be queried lazily on-demand.) | [
"Create",
"a",
"Torrent",
"object",
"from",
"a",
"given",
"URL",
"."
] | 091d74f34837673af936daa9f462ad8216be9916 | https://github.com/Pringley/spyglass/blob/091d74f34837673af936daa9f462ad8216be9916/spyglass/scraper.py#L61-L75 |
248,758 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._send_command | def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c | python | def _send_command(self, command, immediate=False, timeout=1.0,
check_echo=None):
""" Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive.
"""
# Use the default echo checking if None was given.
if check_echo is None:
check_echo = self._check_echo
# Convert to bytes and then strip comments, whitespace, and
# newlines.
if sys.hexversion >= 0x03000000:
c = bytes(command, encoding='ASCII')
else:
c = command
c = c.split(b';')[0].strip()
# If the command is supposed to be immediate, insure that it
# starts with an '!'.
if immediate and not c.startswith(b'!'):
c = b'!' + c
# Read out any junk on the serial port before we start.
self._ser.read(self._ser.inWaiting())
# The command needs to be written a character at a time with
# pauses between them to make sure nothing gets lost or
# corrupted. This is a simple loop if we are not checking the
# echo. If we are, it is more complicated.
if not check_echo:
for i in range(0, len(c)):
self._ser.write(bytes([c[i]]))
time.sleep(0.01)
else:
# Infinite timeouts need to be converted to None. Finite
# ones need to be checked to make sure they are not too big,
# which is threading.TIMEOUT_MAX on Python 3.x and not
# specified on Python 2.x (lets use a week).
if timeout is None or timeout <= 0:
timeout = None
else:
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. Then, the
# timer is started.
tm = threading.Timer(timeout, lambda : None)
tm.start()
# Each character needs to be written one by one while the
# echo is collected. If any mistakes occur, they need to be
# corrected with backspaces b'\x08'. The echo starts out
# empty. We go until either the echo is identical to the
# command or the timeout is exceeded.
echo = b''
while c != echo and tm.is_alive():
# If there are no mistakes, then echo will be the
# beginning of c meaning the next character can be
# written. Otherwise, there is a mistake and a backspace
# needs to be written.
if c.startswith(echo):
self._ser.write(bytes([c[len(echo)]]))
else:
self._ser.write(b'\x08')
# Pause for a bit to make sure nothing gets lost. Then
# read the drive's output add it to the echo.
time.sleep(0.01)
echo += self._ser.read(self._ser.inWaiting())
# All backspaces in echo need to be processed. Each
# backspace deletes itself and the character before it
# (if any).
while b'\x08' in echo:
index = echo.index(b'\x08')
if index == 0:
echo = echo[1:]
else:
echo = echo[0:(index-1)] + echo[(index+1):]
# Turn off the timer in the case that it is still running
# (command completely written before timeout).
tm.cancel()
# Write the carriage return to enter the command and then return
# the sanitized command.
self._ser.write(b'\r')
if sys.hexversion >= 0x03000000:
return c.decode(errors='replace')
else:
return c | [
"def",
"_send_command",
"(",
"self",
",",
"command",
",",
"immediate",
"=",
"False",
",",
"timeout",
"=",
"1.0",
",",
"check_echo",
"=",
"None",
")",
":",
"# Use the default echo checking if None was given.",
"if",
"check_echo",
"is",
"None",
":",
"check_echo",
"=",
"self",
".",
"_check_echo",
"# Convert to bytes and then strip comments, whitespace, and",
"# newlines.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"c",
"=",
"bytes",
"(",
"command",
",",
"encoding",
"=",
"'ASCII'",
")",
"else",
":",
"c",
"=",
"command",
"c",
"=",
"c",
".",
"split",
"(",
"b';'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"# If the command is supposed to be immediate, insure that it",
"# starts with an '!'.",
"if",
"immediate",
"and",
"not",
"c",
".",
"startswith",
"(",
"b'!'",
")",
":",
"c",
"=",
"b'!'",
"+",
"c",
"# Read out any junk on the serial port before we start.",
"self",
".",
"_ser",
".",
"read",
"(",
"self",
".",
"_ser",
".",
"inWaiting",
"(",
")",
")",
"# The command needs to be written a character at a time with",
"# pauses between them to make sure nothing gets lost or",
"# corrupted. This is a simple loop if we are not checking the",
"# echo. If we are, it is more complicated.",
"if",
"not",
"check_echo",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"c",
")",
")",
":",
"self",
".",
"_ser",
".",
"write",
"(",
"bytes",
"(",
"[",
"c",
"[",
"i",
"]",
"]",
")",
")",
"time",
".",
"sleep",
"(",
"0.01",
")",
"else",
":",
"# Infinite timeouts need to be converted to None. Finite",
"# ones need to be checked to make sure they are not too big,",
"# which is threading.TIMEOUT_MAX on Python 3.x and not",
"# specified on Python 2.x (lets use a week).",
"if",
"timeout",
"is",
"None",
"or",
"timeout",
"<=",
"0",
":",
"timeout",
"=",
"None",
"else",
":",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"maxtimeout",
"=",
"threading",
".",
"TIMEOUT_MAX",
"else",
":",
"maxtimeout",
"=",
"7",
"*",
"24",
"*",
"3600",
"timeout",
"=",
"min",
"(",
"timeout",
",",
"maxtimeout",
")",
"# A timer will be made that takes timeout to finish. Then,",
"# it is a matter of checking whether it is alive or not to",
"# know whether the timeout was exceeded or not. Then, the",
"# timer is started.",
"tm",
"=",
"threading",
".",
"Timer",
"(",
"timeout",
",",
"lambda",
":",
"None",
")",
"tm",
".",
"start",
"(",
")",
"# Each character needs to be written one by one while the",
"# echo is collected. If any mistakes occur, they need to be",
"# corrected with backspaces b'\\x08'. The echo starts out",
"# empty. We go until either the echo is identical to the",
"# command or the timeout is exceeded.",
"echo",
"=",
"b''",
"while",
"c",
"!=",
"echo",
"and",
"tm",
".",
"is_alive",
"(",
")",
":",
"# If there are no mistakes, then echo will be the",
"# beginning of c meaning the next character can be",
"# written. Otherwise, there is a mistake and a backspace",
"# needs to be written.",
"if",
"c",
".",
"startswith",
"(",
"echo",
")",
":",
"self",
".",
"_ser",
".",
"write",
"(",
"bytes",
"(",
"[",
"c",
"[",
"len",
"(",
"echo",
")",
"]",
"]",
")",
")",
"else",
":",
"self",
".",
"_ser",
".",
"write",
"(",
"b'\\x08'",
")",
"# Pause for a bit to make sure nothing gets lost. Then",
"# read the drive's output add it to the echo.",
"time",
".",
"sleep",
"(",
"0.01",
")",
"echo",
"+=",
"self",
".",
"_ser",
".",
"read",
"(",
"self",
".",
"_ser",
".",
"inWaiting",
"(",
")",
")",
"# All backspaces in echo need to be processed. Each",
"# backspace deletes itself and the character before it",
"# (if any).",
"while",
"b'\\x08'",
"in",
"echo",
":",
"index",
"=",
"echo",
".",
"index",
"(",
"b'\\x08'",
")",
"if",
"index",
"==",
"0",
":",
"echo",
"=",
"echo",
"[",
"1",
":",
"]",
"else",
":",
"echo",
"=",
"echo",
"[",
"0",
":",
"(",
"index",
"-",
"1",
")",
"]",
"+",
"echo",
"[",
"(",
"index",
"+",
"1",
")",
":",
"]",
"# Turn off the timer in the case that it is still running",
"# (command completely written before timeout).",
"tm",
".",
"cancel",
"(",
")",
"# Write the carriage return to enter the command and then return",
"# the sanitized command.",
"self",
".",
"_ser",
".",
"write",
"(",
"b'\\r'",
")",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"return",
"c",
".",
"decode",
"(",
"errors",
"=",
"'replace'",
")",
"else",
":",
"return",
"c"
] | Send a single command to the drive after sanitizing it.
Takes a single given `command`, sanitizes it (strips out
comments, extra whitespace, and newlines), sends the command to
the drive, and returns the sanitized command. The validity of
the command is **NOT** checked.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : number, optional
Optional timeout in seconds to use to get the command right
when we are doing echo checking. A negative value or
``None`` indicates that the an infinite timeout should be
used.
check_echo : bool or None, optional
Whether the echoing of the command as it is being written to
the drive should be used to correct mistakes in what the
drive is seeing, or whether the default set when the
instance of this class was created should be used
(``None``).
Returns
-------
sanitized_command : str
The sanitized command that was sent to the drive. | [
"Send",
"a",
"single",
"command",
"to",
"the",
"drive",
"after",
"sanitizing",
"it",
"."
] | 8de347ffb91228fbfe3832098b4996fa0141d8f1 | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L127-L251 |
248,759 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._get_response | def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf | python | def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf | [
"def",
"_get_response",
"(",
"self",
",",
"timeout",
"=",
"1.0",
",",
"eor",
"=",
"(",
"'\\n'",
",",
"'\\n- '",
")",
")",
":",
"# If no timeout is given or it is invalid and we are using '\\n'",
"# as the eor, use the wrapper to read a line with an infinite",
"# timeout. Otherwise, the reading and timeout must be",
"# implemented manually.",
"if",
"(",
"timeout",
"is",
"None",
"or",
"timeout",
"<",
"0",
")",
"and",
"eor",
"==",
"'\\n'",
":",
"return",
"self",
".",
"_sio",
".",
"readline",
"(",
")",
"else",
":",
"# A timer will be made that takes timeout to finish. Then,",
"# it is a matter of checking whether it is alive or not to",
"# know whether the timeout was exceeded or not. They need to",
"# be checked to make sure they are not too big, which is",
"# threading.TIMEOUT_MAX on Python 3.x and not specified on",
"# Python 2.x (lets use a week). Then, the timer is started.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"maxtimeout",
"=",
"threading",
".",
"TIMEOUT_MAX",
"else",
":",
"maxtimeout",
"=",
"7",
"*",
"24",
"*",
"3600",
"timeout",
"=",
"min",
"(",
"timeout",
",",
"maxtimeout",
")",
"tm",
"=",
"threading",
".",
"Timer",
"(",
"timeout",
",",
"lambda",
":",
"None",
")",
"tm",
".",
"start",
"(",
")",
"# eor needs to be converted to bytes. If it is just an str,",
"# it needs to be wrapped in a tuple.",
"if",
"isinstance",
"(",
"eor",
",",
"str",
")",
":",
"eor",
"=",
"tuple",
"(",
"[",
"eor",
"]",
")",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"eor",
"=",
"[",
"s",
".",
"encode",
"(",
"encoding",
"=",
"'ASCII'",
")",
"for",
"s",
"in",
"eor",
"]",
"# Read from the serial port into buf until the EOR is found",
"# or the timer has stopped. A small pause is done each time",
"# so that this thread doesn't hog the CPU.",
"buf",
"=",
"b''",
"while",
"not",
"any",
"(",
"[",
"(",
"x",
"in",
"buf",
")",
"for",
"x",
"in",
"eor",
"]",
")",
"and",
"tm",
".",
"is_alive",
"(",
")",
":",
"time",
".",
"sleep",
"(",
"0.001",
")",
"buf",
"+=",
"self",
".",
"_ser",
".",
"read",
"(",
"self",
".",
"_ser",
".",
"inWaiting",
"(",
")",
")",
"# Just in case the timer has not stopped (EOR was found),",
"# stop it.",
"tm",
".",
"cancel",
"(",
")",
"# Remove anything after the EOR if there is one. First, a",
"# set of matches (index, eor_str) for each string in eor",
"# needs to be constructed. Sorting the matches by their",
"# index puts all the ones that were not found (index of -1)",
"# at the front. Then a list of bools that are True for each",
"# index that isn't -1 is made, converted to a bytes (True",
"# goes to b'\\x01' and False goes to b'\\x00'), and then the",
"# index of the first True value found. If it is not -1, then",
"# there was a successful match and all the characters are",
"# dropped after that eor_str.",
"matches",
"=",
"[",
"(",
"buf",
".",
"find",
"(",
"x",
")",
",",
"x",
")",
"for",
"x",
"in",
"eor",
"]",
"matches",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"index",
"=",
"bytes",
"(",
"[",
"x",
"[",
"0",
"]",
"!=",
"-",
"1",
"for",
"x",
"in",
"matches",
"]",
")",
".",
"find",
"(",
"b'\\x01'",
")",
"if",
"index",
"!=",
"-",
"1",
":",
"buf",
"=",
"buf",
"[",
":",
"(",
"matches",
"[",
"index",
"]",
"[",
"0",
"]",
"+",
"len",
"(",
"matches",
"[",
"index",
"]",
"[",
"1",
"]",
")",
")",
"]",
"# Convert to an str before returning.",
"if",
"sys",
".",
"hexversion",
">=",
"0x03000000",
":",
"return",
"buf",
".",
"decode",
"(",
"errors",
"=",
"'replace'",
")",
"else",
":",
"return",
"buf"
] | Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved. | [
"Reads",
"a",
"response",
"from",
"the",
"drive",
"."
] | 8de347ffb91228fbfe3832098b4996fa0141d8f1 | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L253-L338 |
248,760 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232._process_response | def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines] | python | def _process_response(self, response):
""" Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped).
"""
# Strip the trailing newline and split the response into lines
# by carriage returns.
rsp_lines = response.rstrip('\r\n').split('\r')
# If we have at least one line, the first one is the echoed
# command. If available, it needs to be grabbed and that line
# removed from rsp_lines since it is just the echoing, not the
# actual response to the command. None will be used to denote a
# non-existent echo.
if len(rsp_lines) > 0:
echoed_command = rsp_lines[0]
del rsp_lines[0]
else:
echoed_command = None
# If the next line is one of the different possible error
# strings, then there was an error that must be grabbed (leading
# '*' is stripped). If there was an error, remove that line from
# the response. None will be used to denote the lack of an error.
if len(rsp_lines) > 0 and \
rsp_lines[0] in ('*INVALID_ADDRESS', '*INVALID_DATA', \
'*INVALID_DATA_HIGH', '*INVALID_DATA_LOW', \
'*UNDEFINED_LABEL'):
err = rsp_lines[0][1:]
del rsp_lines[0]
else:
err = None
return [response, echoed_command, err, rsp_lines] | [
"def",
"_process_response",
"(",
"self",
",",
"response",
")",
":",
"# Strip the trailing newline and split the response into lines",
"# by carriage returns.",
"rsp_lines",
"=",
"response",
".",
"rstrip",
"(",
"'\\r\\n'",
")",
".",
"split",
"(",
"'\\r'",
")",
"# If we have at least one line, the first one is the echoed",
"# command. If available, it needs to be grabbed and that line",
"# removed from rsp_lines since it is just the echoing, not the",
"# actual response to the command. None will be used to denote a",
"# non-existent echo.",
"if",
"len",
"(",
"rsp_lines",
")",
">",
"0",
":",
"echoed_command",
"=",
"rsp_lines",
"[",
"0",
"]",
"del",
"rsp_lines",
"[",
"0",
"]",
"else",
":",
"echoed_command",
"=",
"None",
"# If the next line is one of the different possible error",
"# strings, then there was an error that must be grabbed (leading",
"# '*' is stripped). If there was an error, remove that line from",
"# the response. None will be used to denote the lack of an error.",
"if",
"len",
"(",
"rsp_lines",
")",
">",
"0",
"and",
"rsp_lines",
"[",
"0",
"]",
"in",
"(",
"'*INVALID_ADDRESS'",
",",
"'*INVALID_DATA'",
",",
"'*INVALID_DATA_HIGH'",
",",
"'*INVALID_DATA_LOW'",
",",
"'*UNDEFINED_LABEL'",
")",
":",
"err",
"=",
"rsp_lines",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"del",
"rsp_lines",
"[",
"0",
"]",
"else",
":",
"err",
"=",
"None",
"return",
"[",
"response",
",",
"echoed_command",
",",
"err",
",",
"rsp_lines",
"]"
] | Processes a response from the drive.
Processes the response returned from the drive. It is broken
down into the echoed command (drive echoes it back), any error
returned by the drive (leading '*' is stripped), and the
different lines of the response.
Parameters
----------
response : str
The response returned by the drive.
Returns
-------
processed_response : list
A 4-element ``list``. The elements, in order, are `response`
(``str``), the echoed command (``str``), any error response
(``None`` if none, or the ``str`` of the error), and the
lines of the response that are not the echo or error line
(``list`` of ``str`` with newlines stripped). | [
"Processes",
"a",
"response",
"from",
"the",
"drive",
"."
] | 8de347ffb91228fbfe3832098b4996fa0141d8f1 | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L340-L391 |
248,761 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232.send_command | def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response | python | def send_command(self, command, immediate=False, timeout=1.0,
max_retries=0, eor=('\n', '\n- ')):
""" Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']]
"""
# Execute the command till it either doesn't have an error or
# the maximum number of retries is exceeded.
for i in range(0, max_retries+1):
# Send the command and stuff the sanitized version in a
# list. Then process the response and add it to the list.
response = [self._send_command(command,
immediate=immediate)]
output = self._get_response(timeout=timeout, eor=eor)
# If echo checking was done, the echo was already grabbed,
# is identical to the command, and needs to be placed back
# in front of the output so that it can be processed
# properly.
if self._check_echo:
output = response[0] + output
response.extend(self._process_response(output))
# We are done if there is no error.
if not self.command_error(response):
break
# Put in a slight pause so the drive has a bit of breathing
# time between retries.
time.sleep(0.25)
return response | [
"def",
"send_command",
"(",
"self",
",",
"command",
",",
"immediate",
"=",
"False",
",",
"timeout",
"=",
"1.0",
",",
"max_retries",
"=",
"0",
",",
"eor",
"=",
"(",
"'\\n'",
",",
"'\\n- '",
")",
")",
":",
"# Execute the command till it either doesn't have an error or",
"# the maximum number of retries is exceeded.",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"max_retries",
"+",
"1",
")",
":",
"# Send the command and stuff the sanitized version in a",
"# list. Then process the response and add it to the list.",
"response",
"=",
"[",
"self",
".",
"_send_command",
"(",
"command",
",",
"immediate",
"=",
"immediate",
")",
"]",
"output",
"=",
"self",
".",
"_get_response",
"(",
"timeout",
"=",
"timeout",
",",
"eor",
"=",
"eor",
")",
"# If echo checking was done, the echo was already grabbed,",
"# is identical to the command, and needs to be placed back",
"# in front of the output so that it can be processed",
"# properly.",
"if",
"self",
".",
"_check_echo",
":",
"output",
"=",
"response",
"[",
"0",
"]",
"+",
"output",
"response",
".",
"extend",
"(",
"self",
".",
"_process_response",
"(",
"output",
")",
")",
"# We are done if there is no error.",
"if",
"not",
"self",
".",
"command_error",
"(",
"response",
")",
":",
"break",
"# Put in a slight pause so the drive has a bit of breathing",
"# time between retries.",
"time",
".",
"sleep",
"(",
"0.25",
")",
"return",
"response"
] | Sends a single command to the drive and returns output.
Takes a single given `command`, sanitizes it, sends it to the
drive, reads the response, and returns the processed response.
The command is first sanitized by removing comments, extra
whitespace, and newline characters. If `immediate` is set, the
command is made to be an immediate command. Note, the command is
**NOT** checked for validity. If the drive returns an error, the
command is re-executed up to `max_tries` more times. The
response from the final execution is processed and returned. The
response from the drive is broken down into the echoed command
(drive echoes it back), any error returned by the drive (leading
'*' is stripped), and the different lines of the response; which
are all returned.
Parameters
----------
command : str
The command to send to the Gemini drive.
immediate : bool, optional
Whether to make it so the command is executed immediately or
not.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
``str`` or an iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
output : list
A 5-element ``list``. The elements, in order, are the
sanitized command (``str``), the full response (``str``),
the echoed command (``str``), any error response (``None``
if none, or the ``str`` of the error), and the lines of the
response that are not the echo or error line (``list`` of
``str`` with newlines stripped).
See Also
--------
send_commands : Send multiple commands.
Examples
--------
Simple command energizing the motor with no response and no
errors.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=False, timeout=1.0)
['DRIVE1', 'DRIVE1\\r\\r\\n', 'DRIVE1', None, []]
Same command but made immediate.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE1', immediate=True, timeout=1.0)
['!DRIVE1', '!DRIVE1\\r\\r\\n', '!DRIVE1', None, []]
Same command with a typo.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIV1', immediate=False, timeout=1.0)
['DRIV1', 'DRIV1\\r*UNDEFINED_LABEL\\r\\r\\n', 'DRIV1',
'UNDEFINED_LABEL', []]
Simple command asking whether the motor is energized or not.
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ar = ASCII_RS232('/dev/ttyS1')
>>> ar.send_command('DRIVE', immediate=False, timeout=1.0)
['DRIVE', 'DRIVE\\r*DRIVE1\\r\\r\\n', 'DRIVE', None,
['*DRIVE1']] | [
"Sends",
"a",
"single",
"command",
"to",
"the",
"drive",
"and",
"returns",
"output",
"."
] | 8de347ffb91228fbfe3832098b4996fa0141d8f1 | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L420-L527 |
248,762 | frejanordsiek/GeminiMotorDrive | GeminiMotorDrive/drivers.py | ASCII_RS232.send_commands | def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses | python | def send_commands(self, commands, timeout=1.0,
max_retries=1, eor=('\n', '\n- ')):
""" Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]]
"""
# If eor is not a list, make a list of it replicated enough for
# every command.
if not isinstance(eor, list):
eor = [eor]*len(commands)
# Do every command one by one, collecting the responses and
# stuffing them in a list. Commands that failed are retried, and
# we stop if the last retry is exhausted.
responses = []
for i, command in enumerate(commands):
rsp = self.send_command(command, timeout=timeout,
max_retries=max_retries,
eor=eor[i])
responses.append(rsp)
if self.command_error(rsp):
break
# Put in a slight pause so the drive has a bit of breathing
# time between commands.
time.sleep(0.25)
return responses | [
"def",
"send_commands",
"(",
"self",
",",
"commands",
",",
"timeout",
"=",
"1.0",
",",
"max_retries",
"=",
"1",
",",
"eor",
"=",
"(",
"'\\n'",
",",
"'\\n- '",
")",
")",
":",
"# If eor is not a list, make a list of it replicated enough for",
"# every command.",
"if",
"not",
"isinstance",
"(",
"eor",
",",
"list",
")",
":",
"eor",
"=",
"[",
"eor",
"]",
"*",
"len",
"(",
"commands",
")",
"# Do every command one by one, collecting the responses and",
"# stuffing them in a list. Commands that failed are retried, and",
"# we stop if the last retry is exhausted.",
"responses",
"=",
"[",
"]",
"for",
"i",
",",
"command",
"in",
"enumerate",
"(",
"commands",
")",
":",
"rsp",
"=",
"self",
".",
"send_command",
"(",
"command",
",",
"timeout",
"=",
"timeout",
",",
"max_retries",
"=",
"max_retries",
",",
"eor",
"=",
"eor",
"[",
"i",
"]",
")",
"responses",
".",
"append",
"(",
"rsp",
")",
"if",
"self",
".",
"command_error",
"(",
"rsp",
")",
":",
"break",
"# Put in a slight pause so the drive has a bit of breathing",
"# time between commands.",
"time",
".",
"sleep",
"(",
"0.25",
")",
"return",
"responses"
] | Send a sequence of commands to the drive and collect output.
Takes a sequence of many commands and executes them one by one
till either all are executed or one runs out of retries
(`max_retries`). Retries are optionally performed if a command's
repsonse indicates that there was an error. Remaining commands
are not executed. The processed output of the final execution
(last try or retry) of each command that was actually executed
is returned.
This function basically feeds commands one by one to
``send_command`` and collates the outputs.
Parameters
----------
commands : iterable of str
Iterable of commands to send to the drive. Each command must
be an ``str``.
timeout : float or None, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
eor : str or iterable of str, optional
End Of Resonse. An EOR is either a ``str`` or an iterable
of ``str`` that denote the possible endings of a response.
'eor' can be a single EOR, in which case it is used for all
commands, or it can be an iterable of EOR to use for each
individual command. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
outputs : list of lists
``list`` composed of the processed responses of each command
in the order that they were done up to and including the
last command executed. See ``send_command`` for the format
of processed responses.
See Also
--------
send_command : Send a single command.
Examples
--------
A sequence of commands to energize the motor, move it a bit away
from the starting position, and then do 4 forward/reverse
cycles, and de-energize the motor. **DO NOT** try these specific
movement distances without checking that the motion won't damage
something (very motor and application specific).
>>> from GeminiMotorDrive.drivers import ASCII_RS232
>>> ra = ASCII_RS232('/dev/ttyS1')
>>> ra.send_commands(['DRIVE1', 'D-10000', 'GO']
... + ['D-10000','GO','D10000','GO']*4
... + [ 'DRIVE0'])
[['DRIVE1', 'DRIVE1\\r', 'DRIVE1', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D-10000', 'D-10000\\r', 'D-10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['D10000', 'D10000\\r', 'D10000', None, []],
['GO', 'GO\\r', 'GO', None, []],
['DRIVE0', 'DRIVE0\\r', 'DRIVE0', None, []]] | [
"Send",
"a",
"sequence",
"of",
"commands",
"to",
"the",
"drive",
"and",
"collect",
"output",
"."
] | 8de347ffb91228fbfe3832098b4996fa0141d8f1 | https://github.com/frejanordsiek/GeminiMotorDrive/blob/8de347ffb91228fbfe3832098b4996fa0141d8f1/GeminiMotorDrive/drivers.py#L529-L631 |
248,763 | tagcubeio/tagcube-cli | tagcube/client/api.py | TagCubeClient.low_level_scan | def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data) | python | def low_level_scan(self, verification_resource, scan_profile_resource,
path_list, notification_resource_list):
"""
Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id
"""
data = {"verification_href": verification_resource.href,
"profile_href": scan_profile_resource.href,
"start_time": "now",
"email_notifications_href": [n.href for n in notification_resource_list],
"path_list": path_list}
url = self.build_full_url('/scans/')
return self.create_resource(url, data) | [
"def",
"low_level_scan",
"(",
"self",
",",
"verification_resource",
",",
"scan_profile_resource",
",",
"path_list",
",",
"notification_resource_list",
")",
":",
"data",
"=",
"{",
"\"verification_href\"",
":",
"verification_resource",
".",
"href",
",",
"\"profile_href\"",
":",
"scan_profile_resource",
".",
"href",
",",
"\"start_time\"",
":",
"\"now\"",
",",
"\"email_notifications_href\"",
":",
"[",
"n",
".",
"href",
"for",
"n",
"in",
"notification_resource_list",
"]",
",",
"\"path_list\"",
":",
"path_list",
"}",
"url",
"=",
"self",
".",
"build_full_url",
"(",
"'/scans/'",
")",
"return",
"self",
".",
"create_resource",
"(",
"url",
",",
"data",
")"
] | Low level implementation of the scan launch which allows you to start
a new scan when you already know the ids for the required resources.
:param verification_resource: The verification associated with the
domain resource to scan
:param scan_profile_resource: The scan profile resource
:param path_list: A list with the paths
:param notification_resource_list: The notifications to use
All the *_resource* parameters are obtained by calling the respective
getters such as:
- get_email_notification
- get_scan_profile
And are expected to be of Resource type
This method's last step is to send a POST request to /1.0/scans/ using
a post-data similar to:
{"verification_href": "/1.0/verifications/6",
"profile_href": "/1.0/profiles/2",
"start_time": "now",
"email_notifications_href": [],
"path_list": ["/"]}'
:return: The newly generated scan id | [
"Low",
"level",
"implementation",
"of",
"the",
"scan",
"launch",
"which",
"allows",
"you",
"to",
"start",
"a",
"new",
"scan",
"when",
"you",
"already",
"know",
"the",
"ids",
"for",
"the",
"required",
"resources",
"."
] | 709e4b0b11331a4d2791dc79107e5081518d75bf | https://github.com/tagcubeio/tagcube-cli/blob/709e4b0b11331a4d2791dc79107e5081518d75bf/tagcube/client/api.py#L182-L218 |
248,764 | xgvargas/smartside | smartside/__init__.py | setAsApplication | def setAsApplication(myappid):
"""
Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version'
"""
if os.name == 'nt':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) | python | def setAsApplication(myappid):
"""
Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version'
"""
if os.name == 'nt':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) | [
"def",
"setAsApplication",
"(",
"myappid",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"import",
"ctypes",
"ctypes",
".",
"windll",
".",
"shell32",
".",
"SetCurrentProcessExplicitAppUserModelID",
"(",
"myappid",
")"
] | Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version' | [
"Tells",
"Windows",
"this",
"is",
"an",
"independent",
"application",
"with",
"an",
"unique",
"icon",
"on",
"task",
"bar",
"."
] | c63acb7d628b161f438e877eca12d550647de34d | https://github.com/xgvargas/smartside/blob/c63acb7d628b161f438e877eca12d550647de34d/smartside/__init__.py#L11-L20 |
248,765 | xgvargas/smartside | smartside/__init__.py | getBestTranslation | def getBestTranslation(basedir, lang=None):
"""
Find inside basedir the best translation available.
lang, if defined, should be a list of prefered languages.
It will look for file in the form:
- en-US.qm
- en_US.qm
- en.qm
"""
if not lang:
lang = QtCore.QLocale.system().uiLanguages()
for l in lang:
l = l.translate({ord('_'): '-'})
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
l = l.translate({ord('-'): '_'})
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
l = l.split('_')[0]
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
else:
return None
translator = QtCore.QTranslator()
translator.load(f)
return translator | python | def getBestTranslation(basedir, lang=None):
"""
Find inside basedir the best translation available.
lang, if defined, should be a list of prefered languages.
It will look for file in the form:
- en-US.qm
- en_US.qm
- en.qm
"""
if not lang:
lang = QtCore.QLocale.system().uiLanguages()
for l in lang:
l = l.translate({ord('_'): '-'})
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
l = l.translate({ord('-'): '_'})
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
l = l.split('_')[0]
f = os.path.join(basedir, l+'.qm')
if os.path.isfile(f): break
else:
return None
translator = QtCore.QTranslator()
translator.load(f)
return translator | [
"def",
"getBestTranslation",
"(",
"basedir",
",",
"lang",
"=",
"None",
")",
":",
"if",
"not",
"lang",
":",
"lang",
"=",
"QtCore",
".",
"QLocale",
".",
"system",
"(",
")",
".",
"uiLanguages",
"(",
")",
"for",
"l",
"in",
"lang",
":",
"l",
"=",
"l",
".",
"translate",
"(",
"{",
"ord",
"(",
"'_'",
")",
":",
"'-'",
"}",
")",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"l",
"+",
"'.qm'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"break",
"l",
"=",
"l",
".",
"translate",
"(",
"{",
"ord",
"(",
"'-'",
")",
":",
"'_'",
"}",
")",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"l",
"+",
"'.qm'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"break",
"l",
"=",
"l",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"l",
"+",
"'.qm'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"break",
"else",
":",
"return",
"None",
"translator",
"=",
"QtCore",
".",
"QTranslator",
"(",
")",
"translator",
".",
"load",
"(",
"f",
")",
"return",
"translator"
] | Find inside basedir the best translation available.
lang, if defined, should be a list of prefered languages.
It will look for file in the form:
- en-US.qm
- en_US.qm
- en.qm | [
"Find",
"inside",
"basedir",
"the",
"best",
"translation",
"available",
"."
] | c63acb7d628b161f438e877eca12d550647de34d | https://github.com/xgvargas/smartside/blob/c63acb7d628b161f438e877eca12d550647de34d/smartside/__init__.py#L23-L56 |
248,766 | davisd50/sparc.cache | sparc/cache/sources/normalize.py | normalizedFieldNameCachableItemMixin.normalize | def normalize(cls, name):
"""Return string in all lower case with spaces and question marks removed"""
name = name.lower() # lower-case
for _replace in [' ','-','(',')','?']:
name = name.replace(_replace,'')
return name | python | def normalize(cls, name):
"""Return string in all lower case with spaces and question marks removed"""
name = name.lower() # lower-case
for _replace in [' ','-','(',')','?']:
name = name.replace(_replace,'')
return name | [
"def",
"normalize",
"(",
"cls",
",",
"name",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"# lower-case",
"for",
"_replace",
"in",
"[",
"' '",
",",
"'-'",
",",
"'('",
",",
"')'",
",",
"'?'",
"]",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"_replace",
",",
"''",
")",
"return",
"name"
] | Return string in all lower case with spaces and question marks removed | [
"Return",
"string",
"in",
"all",
"lower",
"case",
"with",
"spaces",
"and",
"question",
"marks",
"removed"
] | f2378aad48c368a53820e97b093ace790d4d4121 | https://github.com/davisd50/sparc.cache/blob/f2378aad48c368a53820e97b093ace790d4d4121/sparc/cache/sources/normalize.py#L67-L72 |
248,767 | abe-winter/pg13-py | pg13/threevl.py | ThreeVL.nein | def nein(x):
"this is 'not' but not is a keyword so it's 'nein'"
if not isinstance(x,(bool,ThreeVL)): raise TypeError(type(x))
return not x if isinstance(x,bool) else ThreeVL(dict(t='f',f='t',u='u')[x.value]) | python | def nein(x):
"this is 'not' but not is a keyword so it's 'nein'"
if not isinstance(x,(bool,ThreeVL)): raise TypeError(type(x))
return not x if isinstance(x,bool) else ThreeVL(dict(t='f',f='t',u='u')[x.value]) | [
"def",
"nein",
"(",
"x",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"(",
"bool",
",",
"ThreeVL",
")",
")",
":",
"raise",
"TypeError",
"(",
"type",
"(",
"x",
")",
")",
"return",
"not",
"x",
"if",
"isinstance",
"(",
"x",
",",
"bool",
")",
"else",
"ThreeVL",
"(",
"dict",
"(",
"t",
"=",
"'f'",
",",
"f",
"=",
"'t'",
",",
"u",
"=",
"'u'",
")",
"[",
"x",
".",
"value",
"]",
")"
] | this is 'not' but not is a keyword so it's 'nein | [
"this",
"is",
"not",
"but",
"not",
"is",
"a",
"keyword",
"so",
"it",
"s",
"nein"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/threevl.py#L24-L27 |
248,768 | abe-winter/pg13-py | pg13/threevl.py | ThreeVL.compare | def compare(operator,a,b):
"this could be replaced by overloading but I want == to return a bool for 'in' use"
# todo(awinter): what about nested 3vl like "(a=b)=(c=d)". is that allowed by sql? It will choke here if there's a null involved.
f=({'=':lambda a,b:a==b,'!=':lambda a,b:a!=b,'>':lambda a,b:a>b,'<':lambda a,b:a<b}[operator])
return ThreeVL('u') if None in (a,b) else f(a,b) | python | def compare(operator,a,b):
"this could be replaced by overloading but I want == to return a bool for 'in' use"
# todo(awinter): what about nested 3vl like "(a=b)=(c=d)". is that allowed by sql? It will choke here if there's a null involved.
f=({'=':lambda a,b:a==b,'!=':lambda a,b:a!=b,'>':lambda a,b:a>b,'<':lambda a,b:a<b}[operator])
return ThreeVL('u') if None in (a,b) else f(a,b) | [
"def",
"compare",
"(",
"operator",
",",
"a",
",",
"b",
")",
":",
"# todo(awinter): what about nested 3vl like \"(a=b)=(c=d)\". is that allowed by sql? It will choke here if there's a null involved.",
"f",
"=",
"(",
"{",
"'='",
":",
"lambda",
"a",
",",
"b",
":",
"a",
"==",
"b",
",",
"'!='",
":",
"lambda",
"a",
",",
"b",
":",
"a",
"!=",
"b",
",",
"'>'",
":",
"lambda",
"a",
",",
"b",
":",
"a",
">",
"b",
",",
"'<'",
":",
"lambda",
"a",
",",
"b",
":",
"a",
"<",
"b",
"}",
"[",
"operator",
"]",
")",
"return",
"ThreeVL",
"(",
"'u'",
")",
"if",
"None",
"in",
"(",
"a",
",",
"b",
")",
"else",
"f",
"(",
"a",
",",
"b",
")"
] | this could be replaced by overloading but I want == to return a bool for 'in' use | [
"this",
"could",
"be",
"replaced",
"by",
"overloading",
"but",
"I",
"want",
"==",
"to",
"return",
"a",
"bool",
"for",
"in",
"use"
] | c78806f99f35541a8756987e86edca3438aa97f5 | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/threevl.py#L41-L45 |
248,769 | PSU-OIT-ARC/django-cloak | cloak/views.py | login | def login(request, signature):
"""
Automatically logs in a user based on a signed PK of a user object. The
signature should be generated with the `login` management command.
The signature will only work for 60 seconds.
"""
signer = TimestampSigner()
try:
pk = signer.unsign(signature, max_age=MAX_AGE_OF_SIGNATURE_IN_SECONDS)
except (BadSignature, SignatureExpired) as e:
return HttpResponseForbidden("Can't log you in")
user = get_object_or_404(get_user_model(), pk=pk)
# we *have* to set the backend for this user, so we just use the first one
user.backend = settings.AUTHENTICATION_BACKENDS[0]
django_login(request, user)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) | python | def login(request, signature):
"""
Automatically logs in a user based on a signed PK of a user object. The
signature should be generated with the `login` management command.
The signature will only work for 60 seconds.
"""
signer = TimestampSigner()
try:
pk = signer.unsign(signature, max_age=MAX_AGE_OF_SIGNATURE_IN_SECONDS)
except (BadSignature, SignatureExpired) as e:
return HttpResponseForbidden("Can't log you in")
user = get_object_or_404(get_user_model(), pk=pk)
# we *have* to set the backend for this user, so we just use the first one
user.backend = settings.AUTHENTICATION_BACKENDS[0]
django_login(request, user)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL) | [
"def",
"login",
"(",
"request",
",",
"signature",
")",
":",
"signer",
"=",
"TimestampSigner",
"(",
")",
"try",
":",
"pk",
"=",
"signer",
".",
"unsign",
"(",
"signature",
",",
"max_age",
"=",
"MAX_AGE_OF_SIGNATURE_IN_SECONDS",
")",
"except",
"(",
"BadSignature",
",",
"SignatureExpired",
")",
"as",
"e",
":",
"return",
"HttpResponseForbidden",
"(",
"\"Can't log you in\"",
")",
"user",
"=",
"get_object_or_404",
"(",
"get_user_model",
"(",
")",
",",
"pk",
"=",
"pk",
")",
"# we *have* to set the backend for this user, so we just use the first one",
"user",
".",
"backend",
"=",
"settings",
".",
"AUTHENTICATION_BACKENDS",
"[",
"0",
"]",
"django_login",
"(",
"request",
",",
"user",
")",
"return",
"HttpResponseRedirect",
"(",
"settings",
".",
"LOGIN_REDIRECT_URL",
")"
] | Automatically logs in a user based on a signed PK of a user object. The
signature should be generated with the `login` management command.
The signature will only work for 60 seconds. | [
"Automatically",
"logs",
"in",
"a",
"user",
"based",
"on",
"a",
"signed",
"PK",
"of",
"a",
"user",
"object",
".",
"The",
"signature",
"should",
"be",
"generated",
"with",
"the",
"login",
"management",
"command",
"."
] | 3f09711837f4fe7b1813692daa064e536135ffa3 | https://github.com/PSU-OIT-ARC/django-cloak/blob/3f09711837f4fe7b1813692daa064e536135ffa3/cloak/views.py#L12-L30 |
248,770 | PSU-OIT-ARC/django-cloak | cloak/views.py | cloak | def cloak(request, pk=None):
"""
Masquerade as a particular user and redirect based on the
REDIRECT_FIELD_NAME parameter, or the LOGIN_REDIRECT_URL.
Callers can either pass the pk of the user in the URL itself, or as a POST
param.
"""
pk = request.POST.get('pk', pk)
if pk is None:
return HttpResponse("You need to pass a pk POST parameter, or include it in the URL")
user = get_object_or_404(get_user_model(), pk=pk)
if not can_cloak_as(request.user, user):
return HttpResponseForbidden("You are not allowed to cloak as this user")
request.session[SESSION_USER_KEY] = user.pk
# save the referer information so when uncloaking, we can redirect the user
# back to where they were
request.session[SESSION_REDIRECT_KEY] = request.META.get("HTTP_REFERER", settings.LOGIN_REDIRECT_URL)
# redirect the cloaked user to the URL specified in the "next" parameter,
# or to the default redirect URL
return HttpResponseRedirect(request.POST.get(REDIRECT_FIELD_NAME, settings.LOGIN_REDIRECT_URL)) | python | def cloak(request, pk=None):
"""
Masquerade as a particular user and redirect based on the
REDIRECT_FIELD_NAME parameter, or the LOGIN_REDIRECT_URL.
Callers can either pass the pk of the user in the URL itself, or as a POST
param.
"""
pk = request.POST.get('pk', pk)
if pk is None:
return HttpResponse("You need to pass a pk POST parameter, or include it in the URL")
user = get_object_or_404(get_user_model(), pk=pk)
if not can_cloak_as(request.user, user):
return HttpResponseForbidden("You are not allowed to cloak as this user")
request.session[SESSION_USER_KEY] = user.pk
# save the referer information so when uncloaking, we can redirect the user
# back to where they were
request.session[SESSION_REDIRECT_KEY] = request.META.get("HTTP_REFERER", settings.LOGIN_REDIRECT_URL)
# redirect the cloaked user to the URL specified in the "next" parameter,
# or to the default redirect URL
return HttpResponseRedirect(request.POST.get(REDIRECT_FIELD_NAME, settings.LOGIN_REDIRECT_URL)) | [
"def",
"cloak",
"(",
"request",
",",
"pk",
"=",
"None",
")",
":",
"pk",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'pk'",
",",
"pk",
")",
"if",
"pk",
"is",
"None",
":",
"return",
"HttpResponse",
"(",
"\"You need to pass a pk POST parameter, or include it in the URL\"",
")",
"user",
"=",
"get_object_or_404",
"(",
"get_user_model",
"(",
")",
",",
"pk",
"=",
"pk",
")",
"if",
"not",
"can_cloak_as",
"(",
"request",
".",
"user",
",",
"user",
")",
":",
"return",
"HttpResponseForbidden",
"(",
"\"You are not allowed to cloak as this user\"",
")",
"request",
".",
"session",
"[",
"SESSION_USER_KEY",
"]",
"=",
"user",
".",
"pk",
"# save the referer information so when uncloaking, we can redirect the user",
"# back to where they were",
"request",
".",
"session",
"[",
"SESSION_REDIRECT_KEY",
"]",
"=",
"request",
".",
"META",
".",
"get",
"(",
"\"HTTP_REFERER\"",
",",
"settings",
".",
"LOGIN_REDIRECT_URL",
")",
"# redirect the cloaked user to the URL specified in the \"next\" parameter,",
"# or to the default redirect URL",
"return",
"HttpResponseRedirect",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"settings",
".",
"LOGIN_REDIRECT_URL",
")",
")"
] | Masquerade as a particular user and redirect based on the
REDIRECT_FIELD_NAME parameter, or the LOGIN_REDIRECT_URL.
Callers can either pass the pk of the user in the URL itself, or as a POST
param. | [
"Masquerade",
"as",
"a",
"particular",
"user",
"and",
"redirect",
"based",
"on",
"the",
"REDIRECT_FIELD_NAME",
"parameter",
"or",
"the",
"LOGIN_REDIRECT_URL",
"."
] | 3f09711837f4fe7b1813692daa064e536135ffa3 | https://github.com/PSU-OIT-ARC/django-cloak/blob/3f09711837f4fe7b1813692daa064e536135ffa3/cloak/views.py#L34-L58 |
248,771 | chrisdrackett/django-support | support/templatetags/form_tags.py | select_template_from_string | def select_template_from_string(arg):
"""
Select a template from a string, which can include multiple
template paths separated by commas.
"""
if ',' in arg:
tpl = loader.select_template(
[tn.strip() for tn in arg.split(',')])
else:
tpl = loader.get_template(arg)
return tpl | python | def select_template_from_string(arg):
"""
Select a template from a string, which can include multiple
template paths separated by commas.
"""
if ',' in arg:
tpl = loader.select_template(
[tn.strip() for tn in arg.split(',')])
else:
tpl = loader.get_template(arg)
return tpl | [
"def",
"select_template_from_string",
"(",
"arg",
")",
":",
"if",
"','",
"in",
"arg",
":",
"tpl",
"=",
"loader",
".",
"select_template",
"(",
"[",
"tn",
".",
"strip",
"(",
")",
"for",
"tn",
"in",
"arg",
".",
"split",
"(",
"','",
")",
"]",
")",
"else",
":",
"tpl",
"=",
"loader",
".",
"get_template",
"(",
"arg",
")",
"return",
"tpl"
] | Select a template from a string, which can include multiple
template paths separated by commas. | [
"Select",
"a",
"template",
"from",
"a",
"string",
"which",
"can",
"include",
"multiple",
"template",
"paths",
"separated",
"by",
"commas",
"."
] | a4f29421a31797e0b069637a0afec85328b4f0ca | https://github.com/chrisdrackett/django-support/blob/a4f29421a31797e0b069637a0afec85328b4f0ca/support/templatetags/form_tags.py#L6-L17 |
248,772 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/utils/plugins.py | PluginLoader._get_package_path | def _get_package_path(self):
"""Gets the path of a Python package"""
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
return [self.package_path] | python | def _get_package_path(self):
"""Gets the path of a Python package"""
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
return [self.package_path] | [
"def",
"_get_package_path",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"package",
":",
"return",
"[",
"]",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'package_path'",
")",
":",
"m",
"=",
"__import__",
"(",
"self",
".",
"package",
")",
"parts",
"=",
"self",
".",
"package",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
"self",
".",
"package_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"m",
".",
"__file__",
")",
",",
"*",
"parts",
")",
"return",
"[",
"self",
".",
"package_path",
"]"
] | Gets the path of a Python package | [
"Gets",
"the",
"path",
"of",
"a",
"Python",
"package"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/plugins.py#L47-L55 |
248,773 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/utils/plugins.py | PluginLoader._get_paths | def _get_paths(self):
"""Return a list of paths to search for plugins in
The list is searched in order."""
ret = []
ret += ['%s/library/' % os.path.dirname(os.path.dirname(__file__))]
ret += self._extra_dirs
for basedir in _basedirs:
fullpath = os.path.join(basedir, self.subdir)
if fullpath not in ret:
ret.append(fullpath)
ret += self.config.split(os.pathsep)
ret += self._get_package_path()
return ret | python | def _get_paths(self):
"""Return a list of paths to search for plugins in
The list is searched in order."""
ret = []
ret += ['%s/library/' % os.path.dirname(os.path.dirname(__file__))]
ret += self._extra_dirs
for basedir in _basedirs:
fullpath = os.path.join(basedir, self.subdir)
if fullpath not in ret:
ret.append(fullpath)
ret += self.config.split(os.pathsep)
ret += self._get_package_path()
return ret | [
"def",
"_get_paths",
"(",
"self",
")",
":",
"ret",
"=",
"[",
"]",
"ret",
"+=",
"[",
"'%s/library/'",
"%",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"]",
"ret",
"+=",
"self",
".",
"_extra_dirs",
"for",
"basedir",
"in",
"_basedirs",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"self",
".",
"subdir",
")",
"if",
"fullpath",
"not",
"in",
"ret",
":",
"ret",
".",
"append",
"(",
"fullpath",
")",
"ret",
"+=",
"self",
".",
"config",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"ret",
"+=",
"self",
".",
"_get_package_path",
"(",
")",
"return",
"ret"
] | Return a list of paths to search for plugins in
The list is searched in order. | [
"Return",
"a",
"list",
"of",
"paths",
"to",
"search",
"for",
"plugins",
"in"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/plugins.py#L57-L71 |
248,774 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/utils/plugins.py | PluginLoader.print_paths | def print_paths(self):
"""Returns a string suitable for printing of the search path"""
# Uses a list to get the order right
ret = []
for i in self._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret) | python | def print_paths(self):
"""Returns a string suitable for printing of the search path"""
# Uses a list to get the order right
ret = []
for i in self._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret) | [
"def",
"print_paths",
"(",
"self",
")",
":",
"# Uses a list to get the order right",
"ret",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"_get_paths",
"(",
")",
":",
"if",
"i",
"not",
"in",
"ret",
":",
"ret",
".",
"append",
"(",
"i",
")",
"return",
"os",
".",
"pathsep",
".",
"join",
"(",
"ret",
")"
] | Returns a string suitable for printing of the search path | [
"Returns",
"a",
"string",
"suitable",
"for",
"printing",
"of",
"the",
"search",
"path"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/plugins.py#L78-L85 |
248,775 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/utils/plugins.py | PluginLoader.find_plugin | def find_plugin(self, name):
"""Find a plugin named name"""
suffix = ".py"
if not self.class_name:
suffix = ""
for i in self._get_paths():
path = os.path.join(i, "%s%s" % (name, suffix))
if os.path.exists(path):
return path
return None | python | def find_plugin(self, name):
"""Find a plugin named name"""
suffix = ".py"
if not self.class_name:
suffix = ""
for i in self._get_paths():
path = os.path.join(i, "%s%s" % (name, suffix))
if os.path.exists(path):
return path
return None | [
"def",
"find_plugin",
"(",
"self",
",",
"name",
")",
":",
"suffix",
"=",
"\".py\"",
"if",
"not",
"self",
".",
"class_name",
":",
"suffix",
"=",
"\"\"",
"for",
"i",
"in",
"self",
".",
"_get_paths",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"i",
",",
"\"%s%s\"",
"%",
"(",
"name",
",",
"suffix",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"return",
"None"
] | Find a plugin named name | [
"Find",
"a",
"plugin",
"named",
"name"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/utils/plugins.py#L87-L96 |
248,776 | praekelt/jmbo-calendar | jmbo_calendar/admin.py | EventAdmin.get_fieldsets | def get_fieldsets(self, *args, **kwargs):
"""Re-order fields"""
result = super(EventAdmin, self).get_fieldsets(*args, **kwargs)
result = list(result)
fields = list(result[0][1]['fields'])
for name in ('content', 'start', 'end', 'repeat', 'repeat_until', \
'external_link', 'calendars'):
fields.remove(name)
fields.append(name)
result[0][1]['fields'] = tuple(fields)
return tuple(result) | python | def get_fieldsets(self, *args, **kwargs):
"""Re-order fields"""
result = super(EventAdmin, self).get_fieldsets(*args, **kwargs)
result = list(result)
fields = list(result[0][1]['fields'])
for name in ('content', 'start', 'end', 'repeat', 'repeat_until', \
'external_link', 'calendars'):
fields.remove(name)
fields.append(name)
result[0][1]['fields'] = tuple(fields)
return tuple(result) | [
"def",
"get_fieldsets",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"super",
"(",
"EventAdmin",
",",
"self",
")",
".",
"get_fieldsets",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"result",
"=",
"list",
"(",
"result",
")",
"fields",
"=",
"list",
"(",
"result",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
")",
"for",
"name",
"in",
"(",
"'content'",
",",
"'start'",
",",
"'end'",
",",
"'repeat'",
",",
"'repeat_until'",
",",
"'external_link'",
",",
"'calendars'",
")",
":",
"fields",
".",
"remove",
"(",
"name",
")",
"fields",
".",
"append",
"(",
"name",
")",
"result",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"'fields'",
"]",
"=",
"tuple",
"(",
"fields",
")",
"return",
"tuple",
"(",
"result",
")"
] | Re-order fields | [
"Re",
"-",
"order",
"fields"
] | ac39f3ad4c155d6755ec5c5a51fb815268b8c18c | https://github.com/praekelt/jmbo-calendar/blob/ac39f3ad4c155d6755ec5c5a51fb815268b8c18c/jmbo_calendar/admin.py#L36-L46 |
248,777 | shaypal5/utilitime | utilitime/datetime/datetime.py | utc_offset_by_timezone | def utc_offset_by_timezone(timezone_name):
"""Returns the UTC offset of the given timezone in hours.
Arguments
---------
timezone_name: str
A string with a name of a timezone.
Returns
-------
int
The UTC offset of the given timezone, in hours.
"""
return int(pytz.timezone(timezone_name).utcoffset(
utc_time()).total_seconds()/SECONDS_IN_HOUR) | python | def utc_offset_by_timezone(timezone_name):
"""Returns the UTC offset of the given timezone in hours.
Arguments
---------
timezone_name: str
A string with a name of a timezone.
Returns
-------
int
The UTC offset of the given timezone, in hours.
"""
return int(pytz.timezone(timezone_name).utcoffset(
utc_time()).total_seconds()/SECONDS_IN_HOUR) | [
"def",
"utc_offset_by_timezone",
"(",
"timezone_name",
")",
":",
"return",
"int",
"(",
"pytz",
".",
"timezone",
"(",
"timezone_name",
")",
".",
"utcoffset",
"(",
"utc_time",
"(",
")",
")",
".",
"total_seconds",
"(",
")",
"/",
"SECONDS_IN_HOUR",
")"
] | Returns the UTC offset of the given timezone in hours.
Arguments
---------
timezone_name: str
A string with a name of a timezone.
Returns
-------
int
The UTC offset of the given timezone, in hours. | [
"Returns",
"the",
"UTC",
"offset",
"of",
"the",
"given",
"timezone",
"in",
"hours",
"."
] | 554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609 | https://github.com/shaypal5/utilitime/blob/554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609/utilitime/datetime/datetime.py#L29-L43 |
248,778 | shaypal5/utilitime | utilitime/datetime/datetime.py | localize_datetime | def localize_datetime(datetime_obj, timezone_name):
"""Localizes the given UTC-aligned datetime by the given timezone.
Arguments
---------
datetime_obj : datetime.datetime
A datetime object decipting a specific point in time, aligned by UTC.
timezone_name: str
A string with a name of a timezone.
Returns
-------
datetime.datetime
An datetime object aligned by the given timezone.
"""
return datetime_obj.replace(tzinfo=pytz.utc).astimezone(
pytz.timezone(timezone_name)) | python | def localize_datetime(datetime_obj, timezone_name):
"""Localizes the given UTC-aligned datetime by the given timezone.
Arguments
---------
datetime_obj : datetime.datetime
A datetime object decipting a specific point in time, aligned by UTC.
timezone_name: str
A string with a name of a timezone.
Returns
-------
datetime.datetime
An datetime object aligned by the given timezone.
"""
return datetime_obj.replace(tzinfo=pytz.utc).astimezone(
pytz.timezone(timezone_name)) | [
"def",
"localize_datetime",
"(",
"datetime_obj",
",",
"timezone_name",
")",
":",
"return",
"datetime_obj",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
".",
"astimezone",
"(",
"pytz",
".",
"timezone",
"(",
"timezone_name",
")",
")"
] | Localizes the given UTC-aligned datetime by the given timezone.
Arguments
---------
datetime_obj : datetime.datetime
A datetime object decipting a specific point in time, aligned by UTC.
timezone_name: str
A string with a name of a timezone.
Returns
-------
datetime.datetime
An datetime object aligned by the given timezone. | [
"Localizes",
"the",
"given",
"UTC",
"-",
"aligned",
"datetime",
"by",
"the",
"given",
"timezone",
"."
] | 554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609 | https://github.com/shaypal5/utilitime/blob/554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609/utilitime/datetime/datetime.py#L46-L62 |
248,779 | eallik/spinoff | spinoff/remoting/pickler.py | IncomingMessageUnpickler._load_build | def _load_build(self):
"""See `pickle.py` in Python's source code."""
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build() | python | def _load_build(self):
"""See `pickle.py` in Python's source code."""
# if the ctor. function (penultimate on the stack) is the `Ref` class...
if isinstance(self.stack[-2], Ref):
# Ref.__setstate__ will know it's a remote ref if the state is a tuple
self.stack[-1] = (self.stack[-1], self.node)
self.load_build() # continue with the default implementation
# detect our own refs sent back to us
ref = self.stack[-1]
if ref.uri.node == self.node.nid:
ref.is_local = True
ref._cell = self.node.guardian.lookup_cell(ref.uri)
# dbg(("dead " if not ref._cell else "") + "local ref detected")
del ref.node # local refs never need access to the node
else: # pragma: no cover
self.load_build() | [
"def",
"_load_build",
"(",
"self",
")",
":",
"# if the ctor. function (penultimate on the stack) is the `Ref` class...",
"if",
"isinstance",
"(",
"self",
".",
"stack",
"[",
"-",
"2",
"]",
",",
"Ref",
")",
":",
"# Ref.__setstate__ will know it's a remote ref if the state is a tuple",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"=",
"(",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
",",
"self",
".",
"node",
")",
"self",
".",
"load_build",
"(",
")",
"# continue with the default implementation",
"# detect our own refs sent back to us",
"ref",
"=",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"if",
"ref",
".",
"uri",
".",
"node",
"==",
"self",
".",
"node",
".",
"nid",
":",
"ref",
".",
"is_local",
"=",
"True",
"ref",
".",
"_cell",
"=",
"self",
".",
"node",
".",
"guardian",
".",
"lookup_cell",
"(",
"ref",
".",
"uri",
")",
"# dbg((\"dead \" if not ref._cell else \"\") + \"local ref detected\")",
"del",
"ref",
".",
"node",
"# local refs never need access to the node",
"else",
":",
"# pragma: no cover",
"self",
".",
"load_build",
"(",
")"
] | See `pickle.py` in Python's source code. | [
"See",
"pickle",
".",
"py",
"in",
"Python",
"s",
"source",
"code",
"."
] | 06b00d6b86c7422c9cb8f9a4b2915906e92b7d52 | https://github.com/eallik/spinoff/blob/06b00d6b86c7422c9cb8f9a4b2915906e92b7d52/spinoff/remoting/pickler.py#L17-L34 |
248,780 | edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/structures/db/shared.py | path_to_zip | def path_to_zip(path):
"""
Compress `path` to the ZIP.
Args:
path (str): Path to the directory.
Returns:
str: Path to the zipped file (in /tmp).
"""
if not os.path.exists(path):
raise IOError("%s doesn't exists!" % path)
with tempfile.NamedTemporaryFile(delete=False) as ntf:
zip_fn = ntf.name
with zipfile.ZipFile(zip_fn, mode="w") as zip_file:
for root, dirs, files in os.walk(path):
for fn in files:
zip_file.write(os.path.join(root, fn))
return zip_fn | python | def path_to_zip(path):
"""
Compress `path` to the ZIP.
Args:
path (str): Path to the directory.
Returns:
str: Path to the zipped file (in /tmp).
"""
if not os.path.exists(path):
raise IOError("%s doesn't exists!" % path)
with tempfile.NamedTemporaryFile(delete=False) as ntf:
zip_fn = ntf.name
with zipfile.ZipFile(zip_fn, mode="w") as zip_file:
for root, dirs, files in os.walk(path):
for fn in files:
zip_file.write(os.path.join(root, fn))
return zip_fn | [
"def",
"path_to_zip",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"IOError",
"(",
"\"%s doesn't exists!\"",
"%",
"path",
")",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
")",
"as",
"ntf",
":",
"zip_fn",
"=",
"ntf",
".",
"name",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_fn",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"zip_file",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"fn",
"in",
"files",
":",
"zip_file",
".",
"write",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fn",
")",
")",
"return",
"zip_fn"
] | Compress `path` to the ZIP.
Args:
path (str): Path to the directory.
Returns:
str: Path to the zipped file (in /tmp). | [
"Compress",
"path",
"to",
"the",
"ZIP",
"."
] | fb6bd326249847de04b17b64e856c878665cea92 | https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/structures/db/shared.py#L14-L35 |
248,781 | edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/structures/db/shared.py | read_as_base64 | def read_as_base64(fn):
"""
Convert given `fn` to base64 and return it. This method does the process
in not-so-much memory consuming way.
Args:
fn (str): Path to the file which should be converted.
Returns:
str: File encoded as base64.
"""
with open(fn) as unpacked_file:
with tempfile.TemporaryFile() as b64_file:
base64.encode(unpacked_file, b64_file)
b64_file.flush()
b64_file.seek(0)
return b64_file.read() | python | def read_as_base64(fn):
"""
Convert given `fn` to base64 and return it. This method does the process
in not-so-much memory consuming way.
Args:
fn (str): Path to the file which should be converted.
Returns:
str: File encoded as base64.
"""
with open(fn) as unpacked_file:
with tempfile.TemporaryFile() as b64_file:
base64.encode(unpacked_file, b64_file)
b64_file.flush()
b64_file.seek(0)
return b64_file.read() | [
"def",
"read_as_base64",
"(",
"fn",
")",
":",
"with",
"open",
"(",
"fn",
")",
"as",
"unpacked_file",
":",
"with",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"as",
"b64_file",
":",
"base64",
".",
"encode",
"(",
"unpacked_file",
",",
"b64_file",
")",
"b64_file",
".",
"flush",
"(",
")",
"b64_file",
".",
"seek",
"(",
"0",
")",
"return",
"b64_file",
".",
"read",
"(",
")"
] | Convert given `fn` to base64 and return it. This method does the process
in not-so-much memory consuming way.
Args:
fn (str): Path to the file which should be converted.
Returns:
str: File encoded as base64. | [
"Convert",
"given",
"fn",
"to",
"base64",
"and",
"return",
"it",
".",
"This",
"method",
"does",
"the",
"process",
"in",
"not",
"-",
"so",
"-",
"much",
"memory",
"consuming",
"way",
"."
] | fb6bd326249847de04b17b64e856c878665cea92 | https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/structures/db/shared.py#L38-L55 |
248,782 | hkff/FodtlMon | fodtlmon/tools/color.py | _pad_input | def _pad_input(incoming):
"""Avoid IndexError and KeyError by ignoring un-related fields.
Example: '{0}{autored}' becomes '{{0}}{autored}'.
Positional arguments:
incoming -- the input unicode value.
Returns:
Padded unicode value.
"""
incoming_expanded = incoming.replace('{', '{{').replace('}', '}}')
for key in _BASE_CODES:
before, after = '{{%s}}' % key, '{%s}' % key
if before in incoming_expanded:
incoming_expanded = incoming_expanded.replace(before, after)
return incoming_expanded | python | def _pad_input(incoming):
"""Avoid IndexError and KeyError by ignoring un-related fields.
Example: '{0}{autored}' becomes '{{0}}{autored}'.
Positional arguments:
incoming -- the input unicode value.
Returns:
Padded unicode value.
"""
incoming_expanded = incoming.replace('{', '{{').replace('}', '}}')
for key in _BASE_CODES:
before, after = '{{%s}}' % key, '{%s}' % key
if before in incoming_expanded:
incoming_expanded = incoming_expanded.replace(before, after)
return incoming_expanded | [
"def",
"_pad_input",
"(",
"incoming",
")",
":",
"incoming_expanded",
"=",
"incoming",
".",
"replace",
"(",
"'{'",
",",
"'{{'",
")",
".",
"replace",
"(",
"'}'",
",",
"'}}'",
")",
"for",
"key",
"in",
"_BASE_CODES",
":",
"before",
",",
"after",
"=",
"'{{%s}}'",
"%",
"key",
",",
"'{%s}'",
"%",
"key",
"if",
"before",
"in",
"incoming_expanded",
":",
"incoming_expanded",
"=",
"incoming_expanded",
".",
"replace",
"(",
"before",
",",
"after",
")",
"return",
"incoming_expanded"
] | Avoid IndexError and KeyError by ignoring un-related fields.
Example: '{0}{autored}' becomes '{{0}}{autored}'.
Positional arguments:
incoming -- the input unicode value.
Returns:
Padded unicode value. | [
"Avoid",
"IndexError",
"and",
"KeyError",
"by",
"ignoring",
"un",
"-",
"related",
"fields",
"."
] | 0c9015a1a1f0a4a64d52945c86b45441d5871c56 | https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L218-L234 |
248,783 | hkff/FodtlMon | fodtlmon/tools/color.py | _parse_input | def _parse_input(incoming):
"""Performs the actual conversion of tags to ANSI escaped codes.
Provides a version of the input without any colors for len() and other methods.
Positional arguments:
incoming -- the input unicode value.
Returns:
2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
"""
codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming)
color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items())
incoming_padded = _pad_input(incoming)
output_colors = incoming_padded.format(**color_codes)
# Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m'
groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent.
groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups]
groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes.
assert len(groups_compiled) == len(groups) # For testing.
output_colors_simplified = output_colors
for i in range(len(groups)):
output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i])
output_no_colors = _RE_SPLIT.sub('', output_colors_simplified)
# Strip any remaining color codes.
if _AutoCodes.DISABLE_COLORS:
output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified)
return output_colors_simplified, output_no_colors | python | def _parse_input(incoming):
"""Performs the actual conversion of tags to ANSI escaped codes.
Provides a version of the input without any colors for len() and other methods.
Positional arguments:
incoming -- the input unicode value.
Returns:
2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
"""
codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming)
color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items())
incoming_padded = _pad_input(incoming)
output_colors = incoming_padded.format(**color_codes)
# Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m'
groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent.
groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups]
groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes.
assert len(groups_compiled) == len(groups) # For testing.
output_colors_simplified = output_colors
for i in range(len(groups)):
output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i])
output_no_colors = _RE_SPLIT.sub('', output_colors_simplified)
# Strip any remaining color codes.
if _AutoCodes.DISABLE_COLORS:
output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified)
return output_colors_simplified, output_no_colors | [
"def",
"_parse_input",
"(",
"incoming",
")",
":",
"codes",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"_AutoCodes",
"(",
")",
".",
"items",
"(",
")",
"if",
"'{%s}'",
"%",
"k",
"in",
"incoming",
")",
"color_codes",
"=",
"dict",
"(",
"(",
"k",
",",
"''",
"if",
"_AutoCodes",
".",
"DISABLE_COLORS",
"else",
"'\\033[{0}m'",
".",
"format",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"codes",
".",
"items",
"(",
")",
")",
"incoming_padded",
"=",
"_pad_input",
"(",
"incoming",
")",
"output_colors",
"=",
"incoming_padded",
".",
"format",
"(",
"*",
"*",
"color_codes",
")",
"# Simplify: '{b}{red}' -> '\\033[1m\\033[31m' -> '\\033[1;31m'",
"groups",
"=",
"sorted",
"(",
"set",
"(",
"_RE_GROUP_SEARCH",
".",
"findall",
"(",
"output_colors",
")",
")",
",",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"# Get codes, grouped adjacent.",
"groups_simplified",
"=",
"[",
"[",
"x",
"for",
"n",
"in",
"_RE_NUMBER_SEARCH",
".",
"findall",
"(",
"i",
")",
"for",
"x",
"in",
"n",
".",
"split",
"(",
"';'",
")",
"]",
"for",
"i",
"in",
"groups",
"]",
"groups_compiled",
"=",
"[",
"'\\033[{0}m'",
".",
"format",
"(",
"';'",
".",
"join",
"(",
"g",
")",
")",
"for",
"g",
"in",
"groups_simplified",
"]",
"# Final codes.",
"assert",
"len",
"(",
"groups_compiled",
")",
"==",
"len",
"(",
"groups",
")",
"# For testing.",
"output_colors_simplified",
"=",
"output_colors",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"groups",
")",
")",
":",
"output_colors_simplified",
"=",
"output_colors_simplified",
".",
"replace",
"(",
"groups",
"[",
"i",
"]",
",",
"groups_compiled",
"[",
"i",
"]",
")",
"output_no_colors",
"=",
"_RE_SPLIT",
".",
"sub",
"(",
"''",
",",
"output_colors_simplified",
")",
"# Strip any remaining color codes.",
"if",
"_AutoCodes",
".",
"DISABLE_COLORS",
":",
"output_colors_simplified",
"=",
"_RE_NUMBER_SEARCH",
".",
"sub",
"(",
"''",
",",
"output_colors_simplified",
")",
"return",
"output_colors_simplified",
",",
"output_no_colors"
] | Performs the actual conversion of tags to ANSI escaped codes.
Provides a version of the input without any colors for len() and other methods.
Positional arguments:
incoming -- the input unicode value.
Returns:
2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. | [
"Performs",
"the",
"actual",
"conversion",
"of",
"tags",
"to",
"ANSI",
"escaped",
"codes",
"."
] | 0c9015a1a1f0a4a64d52945c86b45441d5871c56 | https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L237-L267 |
248,784 | hkff/FodtlMon | fodtlmon/tools/color.py | list_tags | def list_tags():
"""Lists the available tags.
Returns:
Tuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value).
"""
codes = _AutoCodes()
grouped = set([(k, '/{0}'.format(k), codes[k], codes['/{0}'.format(k)]) for k in codes if not k.startswith('/')])
# Add half-tags like /all.
found = [c for r in grouped for c in r[:2]]
missing = set([('', r[0], None, r[1]) if r[0].startswith('/') else (r[0], '', r[1], None)
for r in _AutoCodes().items() if r[0] not in found])
grouped |= missing
# Sort.
payload = sorted([i for i in grouped if i[2] is None], key=lambda x: x[3]) # /all /fg /bg
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[2] < 10], key=lambda x: x[2])) # b i u flash
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[0].startswith('auto')], key=lambda x: x[2])) # auto colors
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if not i[0].startswith('hi')], key=lambda x: x[2])) # dark colors
grouped -= set(payload)
payload.extend(sorted(grouped, key=lambda x: x[2])) # light colors
return tuple(payload) | python | def list_tags():
"""Lists the available tags.
Returns:
Tuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value).
"""
codes = _AutoCodes()
grouped = set([(k, '/{0}'.format(k), codes[k], codes['/{0}'.format(k)]) for k in codes if not k.startswith('/')])
# Add half-tags like /all.
found = [c for r in grouped for c in r[:2]]
missing = set([('', r[0], None, r[1]) if r[0].startswith('/') else (r[0], '', r[1], None)
for r in _AutoCodes().items() if r[0] not in found])
grouped |= missing
# Sort.
payload = sorted([i for i in grouped if i[2] is None], key=lambda x: x[3]) # /all /fg /bg
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[2] < 10], key=lambda x: x[2])) # b i u flash
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[0].startswith('auto')], key=lambda x: x[2])) # auto colors
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if not i[0].startswith('hi')], key=lambda x: x[2])) # dark colors
grouped -= set(payload)
payload.extend(sorted(grouped, key=lambda x: x[2])) # light colors
return tuple(payload) | [
"def",
"list_tags",
"(",
")",
":",
"codes",
"=",
"_AutoCodes",
"(",
")",
"grouped",
"=",
"set",
"(",
"[",
"(",
"k",
",",
"'/{0}'",
".",
"format",
"(",
"k",
")",
",",
"codes",
"[",
"k",
"]",
",",
"codes",
"[",
"'/{0}'",
".",
"format",
"(",
"k",
")",
"]",
")",
"for",
"k",
"in",
"codes",
"if",
"not",
"k",
".",
"startswith",
"(",
"'/'",
")",
"]",
")",
"# Add half-tags like /all.",
"found",
"=",
"[",
"c",
"for",
"r",
"in",
"grouped",
"for",
"c",
"in",
"r",
"[",
":",
"2",
"]",
"]",
"missing",
"=",
"set",
"(",
"[",
"(",
"''",
",",
"r",
"[",
"0",
"]",
",",
"None",
",",
"r",
"[",
"1",
"]",
")",
"if",
"r",
"[",
"0",
"]",
".",
"startswith",
"(",
"'/'",
")",
"else",
"(",
"r",
"[",
"0",
"]",
",",
"''",
",",
"r",
"[",
"1",
"]",
",",
"None",
")",
"for",
"r",
"in",
"_AutoCodes",
"(",
")",
".",
"items",
"(",
")",
"if",
"r",
"[",
"0",
"]",
"not",
"in",
"found",
"]",
")",
"grouped",
"|=",
"missing",
"# Sort.",
"payload",
"=",
"sorted",
"(",
"[",
"i",
"for",
"i",
"in",
"grouped",
"if",
"i",
"[",
"2",
"]",
"is",
"None",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"3",
"]",
")",
"# /all /fg /bg",
"grouped",
"-=",
"set",
"(",
"payload",
")",
"payload",
".",
"extend",
"(",
"sorted",
"(",
"[",
"i",
"for",
"i",
"in",
"grouped",
"if",
"i",
"[",
"2",
"]",
"<",
"10",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
")",
")",
"# b i u flash",
"grouped",
"-=",
"set",
"(",
"payload",
")",
"payload",
".",
"extend",
"(",
"sorted",
"(",
"[",
"i",
"for",
"i",
"in",
"grouped",
"if",
"i",
"[",
"0",
"]",
".",
"startswith",
"(",
"'auto'",
")",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
")",
")",
"# auto colors",
"grouped",
"-=",
"set",
"(",
"payload",
")",
"payload",
".",
"extend",
"(",
"sorted",
"(",
"[",
"i",
"for",
"i",
"in",
"grouped",
"if",
"not",
"i",
"[",
"0",
"]",
".",
"startswith",
"(",
"'hi'",
")",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
")",
")",
"# dark colors",
"grouped",
"-=",
"set",
"(",
"payload",
")",
"payload",
".",
"extend",
"(",
"sorted",
"(",
"grouped",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
")",
")",
"# light colors",
"return",
"tuple",
"(",
"payload",
")"
] | Lists the available tags.
Returns:
Tuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value). | [
"Lists",
"the",
"available",
"tags",
"."
] | 0c9015a1a1f0a4a64d52945c86b45441d5871c56 | https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L287-L312 |
248,785 | hkff/FodtlMon | fodtlmon/tools/color.py | _WindowsStream._set_color | def _set_color(self, color_code):
"""Changes the foreground and background colors for subsequently printed characters.
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
Positional arguments:
color_code -- integer color code from _WINDOWS_CODES.
"""
# Get current color code.
current_fg, current_bg = self._get_colors()
# Handle special negative codes. Also determine the final color code.
if color_code == -39:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == -49:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == -33:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == -8:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
_WindowsCSBI.WINDLL.kernel32.SetConsoleTextAttribute(self.win32_stream_handle, final_color_code) | python | def _set_color(self, color_code):
"""Changes the foreground and background colors for subsequently printed characters.
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
Positional arguments:
color_code -- integer color code from _WINDOWS_CODES.
"""
# Get current color code.
current_fg, current_bg = self._get_colors()
# Handle special negative codes. Also determine the final color code.
if color_code == -39:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == -49:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == -33:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == -8:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
_WindowsCSBI.WINDLL.kernel32.SetConsoleTextAttribute(self.win32_stream_handle, final_color_code) | [
"def",
"_set_color",
"(",
"self",
",",
"color_code",
")",
":",
"# Get current color code.",
"current_fg",
",",
"current_bg",
"=",
"self",
".",
"_get_colors",
"(",
")",
"# Handle special negative codes. Also determine the final color code.",
"if",
"color_code",
"==",
"-",
"39",
":",
"final_color_code",
"=",
"self",
".",
"default_fg",
"|",
"current_bg",
"# Reset the foreground only.",
"elif",
"color_code",
"==",
"-",
"49",
":",
"final_color_code",
"=",
"current_fg",
"|",
"self",
".",
"default_bg",
"# Reset the background only.",
"elif",
"color_code",
"==",
"-",
"33",
":",
"final_color_code",
"=",
"self",
".",
"default_fg",
"|",
"self",
".",
"default_bg",
"# Reset both.",
"elif",
"color_code",
"==",
"-",
"8",
":",
"final_color_code",
"=",
"current_fg",
"# Black background.",
"else",
":",
"new_is_bg",
"=",
"color_code",
"in",
"self",
".",
"ALL_BG_CODES",
"final_color_code",
"=",
"color_code",
"|",
"(",
"current_fg",
"if",
"new_is_bg",
"else",
"current_bg",
")",
"# Set new code.",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"SetConsoleTextAttribute",
"(",
"self",
".",
"win32_stream_handle",
",",
"final_color_code",
")"
] | Changes the foreground and background colors for subsequently printed characters.
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
Positional arguments:
color_code -- integer color code from _WINDOWS_CODES. | [
"Changes",
"the",
"foreground",
"and",
"background",
"colors",
"for",
"subsequently",
"printed",
"characters",
"."
] | 0c9015a1a1f0a4a64d52945c86b45441d5871c56 | https://github.com/hkff/FodtlMon/blob/0c9015a1a1f0a4a64d52945c86b45441d5871c56/fodtlmon/tools/color.py#L705-L738 |
248,786 | af/turrentine | turrentine/views.py | PageView.get_mimetype | def get_mimetype(self):
"""
Use the ending of the template name to infer response's Content-Type header.
"""
template_name = self.get_template_names()[0]
for extension, mimetype in turrentine_settings.TURRENTINE_MIMETYPE_EXTENSIONS:
if template_name.endswith(extension):
return mimetype
return 'text/html' | python | def get_mimetype(self):
"""
Use the ending of the template name to infer response's Content-Type header.
"""
template_name = self.get_template_names()[0]
for extension, mimetype in turrentine_settings.TURRENTINE_MIMETYPE_EXTENSIONS:
if template_name.endswith(extension):
return mimetype
return 'text/html' | [
"def",
"get_mimetype",
"(",
"self",
")",
":",
"template_name",
"=",
"self",
".",
"get_template_names",
"(",
")",
"[",
"0",
"]",
"for",
"extension",
",",
"mimetype",
"in",
"turrentine_settings",
".",
"TURRENTINE_MIMETYPE_EXTENSIONS",
":",
"if",
"template_name",
".",
"endswith",
"(",
"extension",
")",
":",
"return",
"mimetype",
"return",
"'text/html'"
] | Use the ending of the template name to infer response's Content-Type header. | [
"Use",
"the",
"ending",
"of",
"the",
"template",
"name",
"to",
"infer",
"response",
"s",
"Content",
"-",
"Type",
"header",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/views.py#L37-L45 |
248,787 | af/turrentine | turrentine/views.py | PageView.get | def get(self, request, *args, **kwargs):
"""
Check user authentication if the page requires a login.
We could do this by overriding dispatch() instead, but we assume
that only GET requests will be required by the CMS pages.
"""
try:
page = self.object = self.get_object()
except Http404:
# If APPEND_SLASH is set and our url has no trailing slash,
# look for a CMS page at the alternate url:
if settings.APPEND_SLASH and not self.kwargs.get('path', '/').endswith('/'):
return self._try_url_with_appended_slash()
else:
raise Http404
# Check request.user's credentials in accessing this page:
if page.staff_only and not request.user.is_staff:
# Block out non-staff users on restricted pages.
# Django 1.4 will introduce better HTTP 403 support, but until then
# we'll just render a plain "permission denied" template (which can be overridden):
return render(request, 'turrentine/403.html', status=403)
if page.login_required and request.user.is_anonymous():
redirect_url = '%s?next=%s' % (settings.LOGIN_URL, self.kwargs.get('path', ''))
return HttpResponseRedirect(redirect_url)
else:
self.object = self._mark_html_fields_as_safe(self.object)
context = self.get_context_data(object=self.object)
return self.render_to_response(context, content_type=self.get_mimetype()) | python | def get(self, request, *args, **kwargs):
"""
Check user authentication if the page requires a login.
We could do this by overriding dispatch() instead, but we assume
that only GET requests will be required by the CMS pages.
"""
try:
page = self.object = self.get_object()
except Http404:
# If APPEND_SLASH is set and our url has no trailing slash,
# look for a CMS page at the alternate url:
if settings.APPEND_SLASH and not self.kwargs.get('path', '/').endswith('/'):
return self._try_url_with_appended_slash()
else:
raise Http404
# Check request.user's credentials in accessing this page:
if page.staff_only and not request.user.is_staff:
# Block out non-staff users on restricted pages.
# Django 1.4 will introduce better HTTP 403 support, but until then
# we'll just render a plain "permission denied" template (which can be overridden):
return render(request, 'turrentine/403.html', status=403)
if page.login_required and request.user.is_anonymous():
redirect_url = '%s?next=%s' % (settings.LOGIN_URL, self.kwargs.get('path', ''))
return HttpResponseRedirect(redirect_url)
else:
self.object = self._mark_html_fields_as_safe(self.object)
context = self.get_context_data(object=self.object)
return self.render_to_response(context, content_type=self.get_mimetype()) | [
"def",
"get",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"page",
"=",
"self",
".",
"object",
"=",
"self",
".",
"get_object",
"(",
")",
"except",
"Http404",
":",
"# If APPEND_SLASH is set and our url has no trailing slash,",
"# look for a CMS page at the alternate url:",
"if",
"settings",
".",
"APPEND_SLASH",
"and",
"not",
"self",
".",
"kwargs",
".",
"get",
"(",
"'path'",
",",
"'/'",
")",
".",
"endswith",
"(",
"'/'",
")",
":",
"return",
"self",
".",
"_try_url_with_appended_slash",
"(",
")",
"else",
":",
"raise",
"Http404",
"# Check request.user's credentials in accessing this page:",
"if",
"page",
".",
"staff_only",
"and",
"not",
"request",
".",
"user",
".",
"is_staff",
":",
"# Block out non-staff users on restricted pages.",
"# Django 1.4 will introduce better HTTP 403 support, but until then",
"# we'll just render a plain \"permission denied\" template (which can be overridden):",
"return",
"render",
"(",
"request",
",",
"'turrentine/403.html'",
",",
"status",
"=",
"403",
")",
"if",
"page",
".",
"login_required",
"and",
"request",
".",
"user",
".",
"is_anonymous",
"(",
")",
":",
"redirect_url",
"=",
"'%s?next=%s'",
"%",
"(",
"settings",
".",
"LOGIN_URL",
",",
"self",
".",
"kwargs",
".",
"get",
"(",
"'path'",
",",
"''",
")",
")",
"return",
"HttpResponseRedirect",
"(",
"redirect_url",
")",
"else",
":",
"self",
".",
"object",
"=",
"self",
".",
"_mark_html_fields_as_safe",
"(",
"self",
".",
"object",
")",
"context",
"=",
"self",
".",
"get_context_data",
"(",
"object",
"=",
"self",
".",
"object",
")",
"return",
"self",
".",
"render_to_response",
"(",
"context",
",",
"content_type",
"=",
"self",
".",
"get_mimetype",
"(",
")",
")"
] | Check user authentication if the page requires a login.
We could do this by overriding dispatch() instead, but we assume
that only GET requests will be required by the CMS pages. | [
"Check",
"user",
"authentication",
"if",
"the",
"page",
"requires",
"a",
"login",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/views.py#L47-L76 |
248,788 | af/turrentine | turrentine/views.py | PageView._try_url_with_appended_slash | def _try_url_with_appended_slash(self):
"""
Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it.
If no page is found at that URL, raise Http404.
"""
new_url_to_try = self.kwargs.get('path', '') + '/'
if not new_url_to_try.startswith('/'):
new_url_to_try = '/' + new_url_to_try
if CMSPage.objects.published().filter(url=new_url_to_try).exists():
return HttpResponsePermanentRedirect(new_url_to_try)
else:
raise Http404 | python | def _try_url_with_appended_slash(self):
"""
Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it.
If no page is found at that URL, raise Http404.
"""
new_url_to_try = self.kwargs.get('path', '') + '/'
if not new_url_to_try.startswith('/'):
new_url_to_try = '/' + new_url_to_try
if CMSPage.objects.published().filter(url=new_url_to_try).exists():
return HttpResponsePermanentRedirect(new_url_to_try)
else:
raise Http404 | [
"def",
"_try_url_with_appended_slash",
"(",
"self",
")",
":",
"new_url_to_try",
"=",
"self",
".",
"kwargs",
".",
"get",
"(",
"'path'",
",",
"''",
")",
"+",
"'/'",
"if",
"not",
"new_url_to_try",
".",
"startswith",
"(",
"'/'",
")",
":",
"new_url_to_try",
"=",
"'/'",
"+",
"new_url_to_try",
"if",
"CMSPage",
".",
"objects",
".",
"published",
"(",
")",
".",
"filter",
"(",
"url",
"=",
"new_url_to_try",
")",
".",
"exists",
"(",
")",
":",
"return",
"HttpResponsePermanentRedirect",
"(",
"new_url_to_try",
")",
"else",
":",
"raise",
"Http404"
] | Try our URL with an appended slash. If a CMS page is found at that URL, redirect to it.
If no page is found at that URL, raise Http404. | [
"Try",
"our",
"URL",
"with",
"an",
"appended",
"slash",
".",
"If",
"a",
"CMS",
"page",
"is",
"found",
"at",
"that",
"URL",
"redirect",
"to",
"it",
".",
"If",
"no",
"page",
"is",
"found",
"at",
"that",
"URL",
"raise",
"Http404",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/views.py#L78-L89 |
248,789 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.validate_rule_name | def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | python | def validate_rule_name(self, name):
"""
Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid.
"""
if not name:
raise SerializerError("Rule name is empty".format(name))
if name[0] not in RULE_ALLOWED_START:
msg = "Rule name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in RULE_ALLOWED_CHARS:
msg = ("Invalid rule name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | [
"def",
"validate_rule_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
":",
"raise",
"SerializerError",
"(",
"\"Rule name is empty\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"name",
"[",
"0",
"]",
"not",
"in",
"RULE_ALLOWED_START",
":",
"msg",
"=",
"\"Rule name '{}' must starts with a letter\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"for",
"item",
"in",
"name",
":",
"if",
"item",
"not",
"in",
"RULE_ALLOWED_CHARS",
":",
"msg",
"=",
"(",
"\"Invalid rule name '{}': it must only contains \"",
"\"letters, numbers and '_' character\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"return",
"True"
] | Validate rule name.
Arguments:
name (string): Rule name.
Returns:
bool: ``True`` if rule name is valid. | [
"Validate",
"rule",
"name",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L46-L69 |
248,790 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.validate_variable_name | def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | python | def validate_variable_name(self, name):
"""
Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid.
"""
if not name:
raise SerializerError("Variable name is empty".format(name))
if name[0] not in PROPERTY_ALLOWED_START:
msg = "Variable name '{}' must starts with a letter"
raise SerializerError(msg.format(name))
for item in name:
if item not in PROPERTY_ALLOWED_CHARS:
msg = ("Invalid variable name '{}': it must only contains "
"letters, numbers and '_' character")
raise SerializerError(msg.format(name))
return True | [
"def",
"validate_variable_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"name",
":",
"raise",
"SerializerError",
"(",
"\"Variable name is empty\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"name",
"[",
"0",
"]",
"not",
"in",
"PROPERTY_ALLOWED_START",
":",
"msg",
"=",
"\"Variable name '{}' must starts with a letter\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"for",
"item",
"in",
"name",
":",
"if",
"item",
"not",
"in",
"PROPERTY_ALLOWED_CHARS",
":",
"msg",
"=",
"(",
"\"Invalid variable name '{}': it must only contains \"",
"\"letters, numbers and '_' character\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"return",
"True"
] | Validate variable name.
Arguments:
name (string): Property name.
Returns:
bool: ``True`` if variable name is valid. | [
"Validate",
"variable",
"name",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L71-L94 |
248,791 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.value_splitter | def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items | python | def value_splitter(self, reference, prop, value, mode):
"""
Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list:
"""
items = []
if mode == 'json-list':
try:
items = json.loads(value)
except json.JSONDecodeError as e:
print(value)
msg = ("Reference '{ref}' raised JSON decoder error when "
"splitting values from '{prop}': {err}'")
raise SerializerError(msg.format(ref=reference, prop=prop,
err=e))
else:
if len(value) > 0:
items = value.split(" ")
return items | [
"def",
"value_splitter",
"(",
"self",
",",
"reference",
",",
"prop",
",",
"value",
",",
"mode",
")",
":",
"items",
"=",
"[",
"]",
"if",
"mode",
"==",
"'json-list'",
":",
"try",
":",
"items",
"=",
"json",
".",
"loads",
"(",
"value",
")",
"except",
"json",
".",
"JSONDecodeError",
"as",
"e",
":",
"print",
"(",
"value",
")",
"msg",
"=",
"(",
"\"Reference '{ref}' raised JSON decoder error when \"",
"\"splitting values from '{prop}': {err}'\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"ref",
"=",
"reference",
",",
"prop",
"=",
"prop",
",",
"err",
"=",
"e",
")",
")",
"else",
":",
"if",
"len",
"(",
"value",
")",
">",
"0",
":",
"items",
"=",
"value",
".",
"split",
"(",
"\" \"",
")",
"return",
"items"
] | Split a string into a list items.
Default behavior is to split on white spaces.
Arguments:
reference (string): Reference name used when raising possible
error.
prop (string): Property name used when raising possible error.
value (string): Property value to split.
mode (string): Splitter mode. Default should come from
``ManifestSerializer._DEFAULT_SPLITTER``.
Available splitter are:
* ``white-space``: Simply split a string on white spaces;
* ``json-list``: Assume the string is a JSON list to parse;
Returns:
list: | [
"Split",
"a",
"string",
"into",
"a",
"list",
"items",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L96-L134 |
248,792 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_json | def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content | python | def serialize_to_json(self, name, datas):
"""
Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content.
"""
data_object = datas.get('object', None)
if data_object is None:
msg = ("JSON reference '{}' lacks of required 'object' variable")
raise SerializerError(msg.format(name))
try:
content = json.loads(data_object, object_pairs_hook=OrderedDict)
except json.JSONDecodeError as e:
msg = "JSON reference '{}' raised error from JSON decoder: {}"
raise SerializerError(msg.format(name, e))
else:
return content | [
"def",
"serialize_to_json",
"(",
"self",
",",
"name",
",",
"datas",
")",
":",
"data_object",
"=",
"datas",
".",
"get",
"(",
"'object'",
",",
"None",
")",
"if",
"data_object",
"is",
"None",
":",
"msg",
"=",
"(",
"\"JSON reference '{}' lacks of required 'object' variable\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"try",
":",
"content",
"=",
"json",
".",
"loads",
"(",
"data_object",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"except",
"json",
".",
"JSONDecodeError",
"as",
"e",
":",
"msg",
"=",
"\"JSON reference '{}' raised error from JSON decoder: {}\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
",",
"e",
")",
")",
"else",
":",
"return",
"content"
] | Serialize given datas to any object from assumed JSON string.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
object: Object depending from JSON content. | [
"Serialize",
"given",
"datas",
"to",
"any",
"object",
"from",
"assumed",
"JSON",
"string",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L136-L159 |
248,793 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_list | def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items | python | def serialize_to_list(self, name, datas):
"""
Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas.
"""
items = datas.get('items', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if items is None:
msg = ("List reference '{}' lacks of required 'items' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
items = self.value_splitter(name, 'items', items, mode=splitter)
return items | [
"def",
"serialize_to_list",
"(",
"self",
",",
"name",
",",
"datas",
")",
":",
"items",
"=",
"datas",
".",
"get",
"(",
"'items'",
",",
"None",
")",
"splitter",
"=",
"datas",
".",
"get",
"(",
"'splitter'",
",",
"self",
".",
"_DEFAULT_SPLITTER",
")",
"if",
"items",
"is",
"None",
":",
"msg",
"=",
"(",
"\"List reference '{}' lacks of required 'items' variable \"",
"\"or is empty\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"items",
"=",
"self",
".",
"value_splitter",
"(",
"name",
",",
"'items'",
",",
"items",
",",
"mode",
"=",
"splitter",
")",
"return",
"items"
] | Serialize given datas to a list structure.
List structure is very simple and only require a variable ``--items``
which is a string of values separated with an empty space. Every other
properties are ignored.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
list: List of serialized reference datas. | [
"Serialize",
"given",
"datas",
"to",
"a",
"list",
"structure",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L249-L274 |
248,794 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize_to_string | def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value | python | def serialize_to_string(self, name, datas):
"""
Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value.
"""
value = datas.get('value', None)
if value is None:
msg = ("String reference '{}' lacks of required 'value' variable "
"or is empty")
raise SerializerError(msg.format(name))
return value | [
"def",
"serialize_to_string",
"(",
"self",
",",
"name",
",",
"datas",
")",
":",
"value",
"=",
"datas",
".",
"get",
"(",
"'value'",
",",
"None",
")",
"if",
"value",
"is",
"None",
":",
"msg",
"=",
"(",
"\"String reference '{}' lacks of required 'value' variable \"",
"\"or is empty\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"return",
"value"
] | Serialize given datas to a string.
Simply return the value from required variable``value``.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
string: Value. | [
"Serialize",
"given",
"datas",
"to",
"a",
"string",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L276-L296 |
248,795 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_meta_references | def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names | python | def get_meta_references(self, datas):
"""
Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names.
"""
rule = datas.get(RULE_META_REFERENCES, {})
if not rule:
msg = "Manifest lacks of '.{}' or is empty"
raise SerializerError(msg.format(RULE_META_REFERENCES))
else:
if rule.get('names', None):
names = rule.get('names').split(" ")
elif rule.get('auto', None):
names = self.get_available_references(datas)
else:
msg = ("'.{}' either require '--names' or '--auto' variable "
"to be defined")
raise SerializerError(msg.format(RULE_META_REFERENCES))
for item in names:
self.validate_rule_name(item)
return names | [
"def",
"get_meta_references",
"(",
"self",
",",
"datas",
")",
":",
"rule",
"=",
"datas",
".",
"get",
"(",
"RULE_META_REFERENCES",
",",
"{",
"}",
")",
"if",
"not",
"rule",
":",
"msg",
"=",
"\"Manifest lacks of '.{}' or is empty\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"RULE_META_REFERENCES",
")",
")",
"else",
":",
"if",
"rule",
".",
"get",
"(",
"'names'",
",",
"None",
")",
":",
"names",
"=",
"rule",
".",
"get",
"(",
"'names'",
")",
".",
"split",
"(",
"\" \"",
")",
"elif",
"rule",
".",
"get",
"(",
"'auto'",
",",
"None",
")",
":",
"names",
"=",
"self",
".",
"get_available_references",
"(",
"datas",
")",
"else",
":",
"msg",
"=",
"(",
"\"'.{}' either require '--names' or '--auto' variable \"",
"\"to be defined\"",
")",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"RULE_META_REFERENCES",
")",
")",
"for",
"item",
"in",
"names",
":",
"self",
".",
"validate_rule_name",
"(",
"item",
")",
"return",
"names"
] | Get manifest enabled references declaration
This required declaration is readed from
``styleguide-metas-references`` rule that require either a ``--names``
or ``--auto`` variable, each one define the mode to enable reference:
Manually
Using ``--names`` which define a list of names to enable, every
other non enabled rule will be ignored.
Section name (and so Reference name also) must not contains special
character nor ``-`` so they still be valid variable name for almost
any languages. For word separator inside name, use ``_``.
Automatic
Using ``--auto`` variable every reference rules will be enabled.
The value of this variable is not important since it is not empty.
If both of these variables are defined, the manual enable mode is used.
Arguments:
datas (dict): Data where to search for meta references declaration.
This is commonly the fully parsed manifest.
Returns:
list: A list of reference names. | [
"Get",
"manifest",
"enabled",
"references",
"declaration"
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L298-L344 |
248,796 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_reference | def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context | python | def get_reference(self, datas, name):
"""
Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas.
"""
rule_name = '-'.join((RULE_REFERENCE, name))
structure_mode = 'nested'
if rule_name not in datas:
msg = "Unable to find enabled reference '{}'"
raise SerializerError(msg.format(name))
properties = datas.get(rule_name)
# Search for "structure" variable
if 'structure' in properties:
if properties['structure'] == 'flat':
structure_mode = 'flat'
elif properties['structure'] == 'list':
structure_mode = 'list'
elif properties['structure'] == 'string':
structure_mode = 'string'
elif properties['structure'] == 'json':
structure_mode = 'json'
elif properties['structure'] == 'nested':
pass
else:
msg = "Invalid structure mode name '{}' for reference '{}'"
raise SerializerError(msg.format(structure_mode, name))
del properties['structure']
# Validate variable names
for item in properties.keys():
self.validate_variable_name(item)
# Perform serialize according to structure mode
if structure_mode == 'flat':
context = self.serialize_to_flat(name, properties)
elif structure_mode == 'list':
context = self.serialize_to_list(name, properties)
elif structure_mode == 'string':
context = self.serialize_to_string(name, properties)
elif structure_mode == 'nested':
context = self.serialize_to_nested(name, properties)
elif structure_mode == 'json':
context = self.serialize_to_json(name, properties)
return context | [
"def",
"get_reference",
"(",
"self",
",",
"datas",
",",
"name",
")",
":",
"rule_name",
"=",
"'-'",
".",
"join",
"(",
"(",
"RULE_REFERENCE",
",",
"name",
")",
")",
"structure_mode",
"=",
"'nested'",
"if",
"rule_name",
"not",
"in",
"datas",
":",
"msg",
"=",
"\"Unable to find enabled reference '{}'\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"name",
")",
")",
"properties",
"=",
"datas",
".",
"get",
"(",
"rule_name",
")",
"# Search for \"structure\" variable",
"if",
"'structure'",
"in",
"properties",
":",
"if",
"properties",
"[",
"'structure'",
"]",
"==",
"'flat'",
":",
"structure_mode",
"=",
"'flat'",
"elif",
"properties",
"[",
"'structure'",
"]",
"==",
"'list'",
":",
"structure_mode",
"=",
"'list'",
"elif",
"properties",
"[",
"'structure'",
"]",
"==",
"'string'",
":",
"structure_mode",
"=",
"'string'",
"elif",
"properties",
"[",
"'structure'",
"]",
"==",
"'json'",
":",
"structure_mode",
"=",
"'json'",
"elif",
"properties",
"[",
"'structure'",
"]",
"==",
"'nested'",
":",
"pass",
"else",
":",
"msg",
"=",
"\"Invalid structure mode name '{}' for reference '{}'\"",
"raise",
"SerializerError",
"(",
"msg",
".",
"format",
"(",
"structure_mode",
",",
"name",
")",
")",
"del",
"properties",
"[",
"'structure'",
"]",
"# Validate variable names",
"for",
"item",
"in",
"properties",
".",
"keys",
"(",
")",
":",
"self",
".",
"validate_variable_name",
"(",
"item",
")",
"# Perform serialize according to structure mode",
"if",
"structure_mode",
"==",
"'flat'",
":",
"context",
"=",
"self",
".",
"serialize_to_flat",
"(",
"name",
",",
"properties",
")",
"elif",
"structure_mode",
"==",
"'list'",
":",
"context",
"=",
"self",
".",
"serialize_to_list",
"(",
"name",
",",
"properties",
")",
"elif",
"structure_mode",
"==",
"'string'",
":",
"context",
"=",
"self",
".",
"serialize_to_string",
"(",
"name",
",",
"properties",
")",
"elif",
"structure_mode",
"==",
"'nested'",
":",
"context",
"=",
"self",
".",
"serialize_to_nested",
"(",
"name",
",",
"properties",
")",
"elif",
"structure_mode",
"==",
"'json'",
":",
"context",
"=",
"self",
".",
"serialize_to_json",
"(",
"name",
",",
"properties",
")",
"return",
"context"
] | Get serialized reference datas
Because every reference is turned to a dict (that stands on ``keys``
variable that is a list of key names), every variables must have the
same exact length of word than the key name list.
A reference name starts with 'styleguide-reference-' followed by
name for reference.
A reference can contains variable ``--structure`` setted to ``"flat"``,
``"list"`` or ``"string"`` to define serialization structure.
Arguments:
datas (dict): Data where to search for reference declaration. This
is commonly the fully parsed manifest.
name (string): Reference name to get and serialize.
Returns:
collections.OrderedDict: Serialized reference datas. | [
"Get",
"serialized",
"reference",
"datas"
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L346-L410 |
248,797 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_available_references | def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | python | def get_available_references(self, datas):
"""
Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed.
"""
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | [
"def",
"get_available_references",
"(",
"self",
",",
"datas",
")",
":",
"names",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"datas",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"RULE_REFERENCE",
")",
":",
"names",
".",
"append",
"(",
"k",
"[",
"len",
"(",
"RULE_REFERENCE",
")",
"+",
"1",
":",
"]",
")",
"return",
"names"
] | Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed. | [
"Get",
"available",
"manifest",
"reference",
"names",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L412-L434 |
248,798 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.get_enabled_references | def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references | python | def get_enabled_references(self, datas, meta_references):
"""
Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
references = OrderedDict()
for section in meta_references:
references[section] = self.get_reference(datas, section)
return references | [
"def",
"get_enabled_references",
"(",
"self",
",",
"datas",
",",
"meta_references",
")",
":",
"references",
"=",
"OrderedDict",
"(",
")",
"for",
"section",
"in",
"meta_references",
":",
"references",
"[",
"section",
"]",
"=",
"self",
".",
"get_reference",
"(",
"datas",
",",
"section",
")",
"return",
"references"
] | Get enabled manifest references declarations.
Enabled references are defined through meta references declaration,
every other references are ignored.
Arguments:
datas (dict): Data where to search for reference declarations.
This is commonly the fully parsed manifest.
meta_references (list): List of enabled reference names.
Returns:
collections.OrderedDict: Serialized enabled references datas. | [
"Get",
"enabled",
"manifest",
"references",
"declarations",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L436-L456 |
248,799 | sveetch/py-css-styleguide | py_css_styleguide/serializer.py | ManifestSerializer.serialize | def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references']) | python | def serialize(self, datas):
"""
Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas.
"""
self._metas = OrderedDict({
'references': self.get_meta_references(datas),
})
return self.get_enabled_references(datas, self._metas['references']) | [
"def",
"serialize",
"(",
"self",
",",
"datas",
")",
":",
"self",
".",
"_metas",
"=",
"OrderedDict",
"(",
"{",
"'references'",
":",
"self",
".",
"get_meta_references",
"(",
"datas",
")",
",",
"}",
")",
"return",
"self",
".",
"get_enabled_references",
"(",
"datas",
",",
"self",
".",
"_metas",
"[",
"'references'",
"]",
")"
] | Serialize datas to manifest structure with metas and references.
Only references are returned, metas are assigned to attribute
``ManifestSerializer._metas``.
Arguments:
datas (dict): Data where to search for reference declarations. This
is commonly the fully parsed manifest.
Returns:
collections.OrderedDict: Serialized enabled references datas. | [
"Serialize",
"datas",
"to",
"manifest",
"structure",
"with",
"metas",
"and",
"references",
"."
] | 5acc693f71b2fa7d944d7fed561ae0a7699ccd0f | https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/serializer.py#L458-L476 |
Subsets and Splits