repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
numberoverzero/bloop
bloop/conditions.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L887-L899
def printable_name(column, path=None): """Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar """ pieces = [column.name] path = path or path_of(column) for segment in path: if isinstance(segment, str): pieces.append(segment) else: pieces[-1] += "[{}]".format(segment) return ".".join(pieces)
[ "def", "printable_name", "(", "column", ",", "path", "=", "None", ")", ":", "pieces", "=", "[", "column", ".", "name", "]", "path", "=", "path", "or", "path_of", "(", "column", ")", "for", "segment", "in", "path", ":", "if", "isinstance", "(", "segment", ",", "str", ")", ":", "pieces", ".", "append", "(", "segment", ")", "else", ":", "pieces", "[", "-", "1", "]", "+=", "\"[{}]\"", ".", "format", "(", "segment", ")", "return", "\".\"", ".", "join", "(", "pieces", ")" ]
Provided for debug output when rendering conditions. User.name[3]["foo"][0]["bar"] -> name[3].foo[0].bar
[ "Provided", "for", "debug", "output", "when", "rendering", "conditions", "." ]
python
train
30.846154
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2113-L2124
def train(self, s, path="spelling.txt"): """ Counts the words in the given string and saves the probabilities at the given path. This can be used to generate a new model for the Spelling() constructor. """ model = {} for w in re.findall("[a-z]+", s.lower()): model[w] = w in model and model[w] + 1 or 1 model = ("%s %s" % (k, v) for k, v in sorted(model.items())) model = "\n".join(model) f = open(path, "w") f.write(model) f.close()
[ "def", "train", "(", "self", ",", "s", ",", "path", "=", "\"spelling.txt\"", ")", ":", "model", "=", "{", "}", "for", "w", "in", "re", ".", "findall", "(", "\"[a-z]+\"", ",", "s", ".", "lower", "(", ")", ")", ":", "model", "[", "w", "]", "=", "w", "in", "model", "and", "model", "[", "w", "]", "+", "1", "or", "1", "model", "=", "(", "\"%s %s\"", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "sorted", "(", "model", ".", "items", "(", ")", ")", ")", "model", "=", "\"\\n\"", ".", "join", "(", "model", ")", "f", "=", "open", "(", "path", ",", "\"w\"", ")", "f", ".", "write", "(", "model", ")", "f", ".", "close", "(", ")" ]
Counts the words in the given string and saves the probabilities at the given path. This can be used to generate a new model for the Spelling() constructor.
[ "Counts", "the", "words", "in", "the", "given", "string", "and", "saves", "the", "probabilities", "at", "the", "given", "path", ".", "This", "can", "be", "used", "to", "generate", "a", "new", "model", "for", "the", "Spelling", "()", "constructor", "." ]
python
train
43.166667
wummel/linkchecker
third_party/dnspython/dns/message.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/message.py#L887-L938
def _rr_line(self, section): """Process one line from the text format answer, authority, or additional data sections. """ deleting = None # Name token = self.tok.get(want_leading = True) if not token.is_whitespace(): self.last_name = dns.name.from_text(token.value, None) name = self.last_name token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError # TTL try: ttl = int(token.value, 0) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: ttl = 0 # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE: deleting = rdclass rdclass = self.zone_rdclass except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass = dns.rdataclass.IN # Type rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_eol_or_eof(): self.tok.unget(token) rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None) covers = rd.covers() else: rd = None covers = dns.rdatatype.NONE rrset = self.message.find_rrset(section, name, rdclass, rdtype, covers, deleting, True, self.updating) if not rd is None: rrset.add(rd, ttl)
[ "def", "_rr_line", "(", "self", ",", "section", ")", ":", "deleting", "=", "None", "# Name", "token", "=", "self", ".", "tok", ".", "get", "(", "want_leading", "=", "True", ")", "if", "not", "token", ".", "is_whitespace", "(", ")", ":", "self", ".", "last_name", "=", "dns", ".", "name", ".", "from_text", "(", "token", ".", "value", ",", "None", ")", "name", "=", "self", ".", "last_name", "token", "=", "self", ".", "tok", ".", "get", "(", ")", "if", "not", "token", ".", "is_identifier", "(", ")", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "# TTL", "try", ":", "ttl", "=", "int", "(", "token", ".", "value", ",", "0", ")", "token", "=", "self", ".", "tok", ".", "get", "(", ")", "if", "not", "token", ".", "is_identifier", "(", ")", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "except", "dns", ".", "exception", ".", "SyntaxError", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "except", "Exception", ":", "ttl", "=", "0", "# Class", "try", ":", "rdclass", "=", "dns", ".", "rdataclass", ".", "from_text", "(", "token", ".", "value", ")", "token", "=", "self", ".", "tok", ".", "get", "(", ")", "if", "not", "token", ".", "is_identifier", "(", ")", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "if", "rdclass", "==", "dns", ".", "rdataclass", ".", "ANY", "or", "rdclass", "==", "dns", ".", "rdataclass", ".", "NONE", ":", "deleting", "=", "rdclass", "rdclass", "=", "self", ".", "zone_rdclass", "except", "dns", ".", "exception", ".", "SyntaxError", ":", "raise", "dns", ".", "exception", ".", "SyntaxError", "except", "Exception", ":", "rdclass", "=", "dns", ".", "rdataclass", ".", "IN", "# Type", "rdtype", "=", "dns", ".", "rdatatype", ".", "from_text", "(", "token", ".", "value", ")", "token", "=", "self", ".", "tok", ".", "get", "(", ")", "if", "not", "token", ".", "is_eol_or_eof", "(", ")", ":", "self", ".", "tok", ".", "unget", "(", "token", ")", "rd", "=", "dns", ".", "rdata", ".", "from_text", "(", "rdclass", ",", "rdtype", ",", "self", ".", "tok", ",", "None", ")", "covers", "=", "rd", ".", "covers", "(", ")", "else", ":", "rd", "=", "None", "covers", "=", "dns", ".", "rdatatype", ".", "NONE", "rrset", "=", "self", ".", "message", ".", "find_rrset", "(", "section", ",", "name", ",", "rdclass", ",", "rdtype", ",", "covers", ",", "deleting", ",", "True", ",", "self", ".", "updating", ")", "if", "not", "rd", "is", "None", ":", "rrset", ".", "add", "(", "rd", ",", "ttl", ")" ]
Process one line from the text format answer, authority, or additional data sections.
[ "Process", "one", "line", "from", "the", "text", "format", "answer", "authority", "or", "additional", "data", "sections", "." ]
python
train
36.134615
chemlab/chemlab
chemlab/core/spacegroup/spacegroup.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/spacegroup/spacegroup.py#L208-L219
def get_op(self): """Returns all symmetry operations (including inversions and subtranslations), but unlike get_symop(), they are returned as two ndarrays.""" if self.centrosymmetric: rot = np.tile(np.vstack((self.rotations, -self.rotations)), (self.nsubtrans, 1, 1)) trans = np.repeat(self.subtrans, 2*len(self.rotations), axis=0) else: rot = np.tile(self.rotations, (self.nsubtrans, 1, 1)) trans = np.repeat(self.subtrans, len(self.rotations), axis=0) return rot, trans
[ "def", "get_op", "(", "self", ")", ":", "if", "self", ".", "centrosymmetric", ":", "rot", "=", "np", ".", "tile", "(", "np", ".", "vstack", "(", "(", "self", ".", "rotations", ",", "-", "self", ".", "rotations", ")", ")", ",", "(", "self", ".", "nsubtrans", ",", "1", ",", "1", ")", ")", "trans", "=", "np", ".", "repeat", "(", "self", ".", "subtrans", ",", "2", "*", "len", "(", "self", ".", "rotations", ")", ",", "axis", "=", "0", ")", "else", ":", "rot", "=", "np", ".", "tile", "(", "self", ".", "rotations", ",", "(", "self", ".", "nsubtrans", ",", "1", ",", "1", ")", ")", "trans", "=", "np", ".", "repeat", "(", "self", ".", "subtrans", ",", "len", "(", "self", ".", "rotations", ")", ",", "axis", "=", "0", ")", "return", "rot", ",", "trans" ]
Returns all symmetry operations (including inversions and subtranslations), but unlike get_symop(), they are returned as two ndarrays.
[ "Returns", "all", "symmetry", "operations", "(", "including", "inversions", "and", "subtranslations", ")", "but", "unlike", "get_symop", "()", "they", "are", "returned", "as", "two", "ndarrays", "." ]
python
train
48.583333
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L4839-L4844
def readerWalker(self): """Create an xmltextReader for a preparsed document. """ ret = libxml2mod.xmlReaderWalker(self._o) if ret is None:raise treeError('xmlReaderWalker() failed') __tmp = xmlTextReader(_obj=ret) return __tmp
[ "def", "readerWalker", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlReaderWalker", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlReaderWalker() failed'", ")", "__tmp", "=", "xmlTextReader", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Create an xmltextReader for a preparsed document.
[ "Create", "an", "xmltextReader", "for", "a", "preparsed", "document", "." ]
python
train
43.5
Esri/ArcREST
src/arcrest/common/general.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/general.py#L379-L385
def mosaicMethod(self, value): """ get/set the mosaic method """ if value in self.__allowedMosaicMethods and \ self._mosaicMethod != value: self._mosaicMethod = value
[ "def", "mosaicMethod", "(", "self", ",", "value", ")", ":", "if", "value", "in", "self", ".", "__allowedMosaicMethods", "and", "self", ".", "_mosaicMethod", "!=", "value", ":", "self", ".", "_mosaicMethod", "=", "value" ]
get/set the mosaic method
[ "get", "/", "set", "the", "mosaic", "method" ]
python
train
30.714286
arista-eosplus/pyeapi
pyeapi/api/mlag.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/mlag.py#L137-L149
def _parse_peer_address(self, config): """Scans the config block and parses the peer-address value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict """ match = re.search(r'peer-address ([^\s]+)', config) value = match.group(1) if match else None return dict(peer_address=value)
[ "def", "_parse_peer_address", "(", "self", ",", "config", ")", ":", "match", "=", "re", ".", "search", "(", "r'peer-address ([^\\s]+)'", ",", "config", ")", "value", "=", "match", ".", "group", "(", "1", ")", "if", "match", "else", "None", "return", "dict", "(", "peer_address", "=", "value", ")" ]
Scans the config block and parses the peer-address value Args: config (str): The config block to scan Returns: dict: A dict object that is intended to be merged into the resource dict
[ "Scans", "the", "config", "block", "and", "parses", "the", "peer", "-", "address", "value" ]
python
train
33.923077
ruipgil/TrackToTrip
tracktotrip/similarity.py
https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/similarity.py#L71-L89
def intersection(L1, L2): """Intersects two line segments Args: L1 ([float, float]): x and y coordinates L2 ([float, float]): x and y coordinates Returns: bool: if they intersect (float, float): x and y of intersection, if they do """ D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x, y else: return False
[ "def", "intersection", "(", "L1", ",", "L2", ")", ":", "D", "=", "L1", "[", "0", "]", "*", "L2", "[", "1", "]", "-", "L1", "[", "1", "]", "*", "L2", "[", "0", "]", "Dx", "=", "L1", "[", "2", "]", "*", "L2", "[", "1", "]", "-", "L1", "[", "1", "]", "*", "L2", "[", "2", "]", "Dy", "=", "L1", "[", "0", "]", "*", "L2", "[", "2", "]", "-", "L1", "[", "2", "]", "*", "L2", "[", "0", "]", "if", "D", "!=", "0", ":", "x", "=", "Dx", "/", "D", "y", "=", "Dy", "/", "D", "return", "x", ",", "y", "else", ":", "return", "False" ]
Intersects two line segments Args: L1 ([float, float]): x and y coordinates L2 ([float, float]): x and y coordinates Returns: bool: if they intersect (float, float): x and y of intersection, if they do
[ "Intersects", "two", "line", "segments" ]
python
train
25.526316
titusjan/argos
argos/config/abstractcti.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/abstractcti.py#L417-L439
def getNonDefaultsDict(self): """ Recursively retrieves values as a dictionary to be used for persistence. Does not save defaultData and other properties, only stores values if they differ from the defaultData. If the CTI and none of its children differ from their default, a completely empty dictionary is returned. This is to achieve a smaller json representation. Typically descendants should override _nodeGetNonDefaultsDict instead of this function. """ dct = self._nodeGetNonDefaultsDict() childList = [] for childCti in self.childItems: childDct = childCti.getNonDefaultsDict() if childDct: childList.append(childDct) if childList: dct['childItems'] = childList if dct: dct['nodeName'] = self.nodeName return dct
[ "def", "getNonDefaultsDict", "(", "self", ")", ":", "dct", "=", "self", ".", "_nodeGetNonDefaultsDict", "(", ")", "childList", "=", "[", "]", "for", "childCti", "in", "self", ".", "childItems", ":", "childDct", "=", "childCti", ".", "getNonDefaultsDict", "(", ")", "if", "childDct", ":", "childList", ".", "append", "(", "childDct", ")", "if", "childList", ":", "dct", "[", "'childItems'", "]", "=", "childList", "if", "dct", ":", "dct", "[", "'nodeName'", "]", "=", "self", ".", "nodeName", "return", "dct" ]
Recursively retrieves values as a dictionary to be used for persistence. Does not save defaultData and other properties, only stores values if they differ from the defaultData. If the CTI and none of its children differ from their default, a completely empty dictionary is returned. This is to achieve a smaller json representation. Typically descendants should override _nodeGetNonDefaultsDict instead of this function.
[ "Recursively", "retrieves", "values", "as", "a", "dictionary", "to", "be", "used", "for", "persistence", ".", "Does", "not", "save", "defaultData", "and", "other", "properties", "only", "stores", "values", "if", "they", "differ", "from", "the", "defaultData", ".", "If", "the", "CTI", "and", "none", "of", "its", "children", "differ", "from", "their", "default", "a", "completely", "empty", "dictionary", "is", "returned", ".", "This", "is", "to", "achieve", "a", "smaller", "json", "representation", "." ]
python
train
38.652174
regardscitoyens/legipy
legipy/cli.py
https://github.com/regardscitoyens/legipy/blob/3553c5a56769f23d8922adfbfe44d7b9f4a5204c/legipy/cli.py#L18-L24
def json_serial(obj): """JSON serializer for objects not serializable by default json code""" if isinstance(obj, LegipyModel): return obj.to_json() elif isinstance(obj, (datetime.date, datetime.datetime)): return obj.isoformat() raise TypeError("Type {0} not serializable".format(repr(type(obj))))
[ "def", "json_serial", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "LegipyModel", ")", ":", "return", "obj", ".", "to_json", "(", ")", "elif", "isinstance", "(", "obj", ",", "(", "datetime", ".", "date", ",", "datetime", ".", "datetime", ")", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "raise", "TypeError", "(", "\"Type {0} not serializable\"", ".", "format", "(", "repr", "(", "type", "(", "obj", ")", ")", ")", ")" ]
JSON serializer for objects not serializable by default json code
[ "JSON", "serializer", "for", "objects", "not", "serializable", "by", "default", "json", "code" ]
python
train
46.142857
trendels/rhino
rhino/ext/session.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/ext/session.py#L43-L48
def add_message(self, text, type=None): """Add a message with an optional type.""" key = self._msg_key self.setdefault(key, []) self[key].append(message(type, text)) self.save()
[ "def", "add_message", "(", "self", ",", "text", ",", "type", "=", "None", ")", ":", "key", "=", "self", ".", "_msg_key", "self", ".", "setdefault", "(", "key", ",", "[", "]", ")", "self", "[", "key", "]", ".", "append", "(", "message", "(", "type", ",", "text", ")", ")", "self", ".", "save", "(", ")" ]
Add a message with an optional type.
[ "Add", "a", "message", "with", "an", "optional", "type", "." ]
python
train
35.333333
mottosso/be
be/lib.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/lib.py#L209-L223
def list_projects(root, backend=os.listdir): """List projects at `root` Arguments: root (str): Absolute path to the `be` root directory, typically the current working directory. """ projects = list() for project in sorted(backend(root)): abspath = os.path.join(root, project) if not isproject(abspath): continue projects.append(project) return projects
[ "def", "list_projects", "(", "root", ",", "backend", "=", "os", ".", "listdir", ")", ":", "projects", "=", "list", "(", ")", "for", "project", "in", "sorted", "(", "backend", "(", "root", ")", ")", ":", "abspath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "project", ")", "if", "not", "isproject", "(", "abspath", ")", ":", "continue", "projects", ".", "append", "(", "project", ")", "return", "projects" ]
List projects at `root` Arguments: root (str): Absolute path to the `be` root directory, typically the current working directory.
[ "List", "projects", "at", "root" ]
python
train
28
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/commands/restore_snapshot.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/commands/restore_snapshot.py#L44-L58
def _get_snapshot(vm, snapshot_name): """ Returns snapshot object by its name :param vm: :param snapshot_name: :type snapshot_name: str :return: Snapshot by its name :rtype vim.vm.Snapshot """ snapshots = SnapshotRetriever.get_vm_snapshots(vm) if snapshot_name not in snapshots: raise SnapshotNotFoundException('Snapshot {0} was not found'.format(snapshot_name)) return snapshots[snapshot_name]
[ "def", "_get_snapshot", "(", "vm", ",", "snapshot_name", ")", ":", "snapshots", "=", "SnapshotRetriever", ".", "get_vm_snapshots", "(", "vm", ")", "if", "snapshot_name", "not", "in", "snapshots", ":", "raise", "SnapshotNotFoundException", "(", "'Snapshot {0} was not found'", ".", "format", "(", "snapshot_name", ")", ")", "return", "snapshots", "[", "snapshot_name", "]" ]
Returns snapshot object by its name :param vm: :param snapshot_name: :type snapshot_name: str :return: Snapshot by its name :rtype vim.vm.Snapshot
[ "Returns", "snapshot", "object", "by", "its", "name", ":", "param", "vm", ":", ":", "param", "snapshot_name", ":", ":", "type", "snapshot_name", ":", "str", ":", "return", ":", "Snapshot", "by", "its", "name", ":", "rtype", "vim", ".", "vm", ".", "Snapshot" ]
python
train
32.133333
buildbot/buildbot
master/buildbot/worker/openstack.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/worker/openstack.py#L106-L120
def _constructClient(client_version, username, user_domain, password, project_name, project_domain, auth_url): """Return a novaclient from the given args.""" loader = loading.get_plugin_loader('password') # These only work with v3 if user_domain is not None or project_domain is not None: auth = loader.load_from_options(auth_url=auth_url, username=username, user_domain_name=user_domain, password=password, project_name=project_name, project_domain_name=project_domain) else: auth = loader.load_from_options(auth_url=auth_url, username=username, password=password, project_name=project_name) sess = session.Session(auth=auth) return client.Client(client_version, session=sess)
[ "def", "_constructClient", "(", "client_version", ",", "username", ",", "user_domain", ",", "password", ",", "project_name", ",", "project_domain", ",", "auth_url", ")", ":", "loader", "=", "loading", ".", "get_plugin_loader", "(", "'password'", ")", "# These only work with v3", "if", "user_domain", "is", "not", "None", "or", "project_domain", "is", "not", "None", ":", "auth", "=", "loader", ".", "load_from_options", "(", "auth_url", "=", "auth_url", ",", "username", "=", "username", ",", "user_domain_name", "=", "user_domain", ",", "password", "=", "password", ",", "project_name", "=", "project_name", ",", "project_domain_name", "=", "project_domain", ")", "else", ":", "auth", "=", "loader", ".", "load_from_options", "(", "auth_url", "=", "auth_url", ",", "username", "=", "username", ",", "password", "=", "password", ",", "project_name", "=", "project_name", ")", "sess", "=", "session", ".", "Session", "(", "auth", "=", "auth", ")", "return", "client", ".", "Client", "(", "client_version", ",", "session", "=", "sess", ")" ]
Return a novaclient from the given args.
[ "Return", "a", "novaclient", "from", "the", "given", "args", "." ]
python
train
57.2
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L350-L353
def get_transactions(self, account_id, **params): """https://developers.coinbase.com/api/v2#list-transactions""" response = self._get('v2', 'accounts', account_id, 'transactions', params=params) return self._make_api_object(response, Transaction)
[ "def", "get_transactions", "(", "self", ",", "account_id", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'accounts'", ",", "account_id", ",", "'transactions'", ",", "params", "=", "params", ")", "return", "self", ".", "_make_api_object", "(", "response", ",", "Transaction", ")" ]
https://developers.coinbase.com/api/v2#list-transactions
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#list", "-", "transactions" ]
python
train
66.75
davenquinn/Attitude
docs/scripts/generate-json.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/docs/scripts/generate-json.py#L11-L26
def serialize(pca, **kwargs): """ Serialize an orientation object to a dict suitable for JSON """ strike, dip, rake = pca.strike_dip_rake() hyp_axes = sampling_axes(pca) return dict( **kwargs, principal_axes = pca.axes.tolist(), hyperbolic_axes = hyp_axes.tolist(), n_samples = pca.n, strike=strike, dip=dip, rake=rake, angular_errors=[2*N.degrees(i) for i in angular_errors(hyp_axes)])
[ "def", "serialize", "(", "pca", ",", "*", "*", "kwargs", ")", ":", "strike", ",", "dip", ",", "rake", "=", "pca", ".", "strike_dip_rake", "(", ")", "hyp_axes", "=", "sampling_axes", "(", "pca", ")", "return", "dict", "(", "*", "*", "kwargs", ",", "principal_axes", "=", "pca", ".", "axes", ".", "tolist", "(", ")", ",", "hyperbolic_axes", "=", "hyp_axes", ".", "tolist", "(", ")", ",", "n_samples", "=", "pca", ".", "n", ",", "strike", "=", "strike", ",", "dip", "=", "dip", ",", "rake", "=", "rake", ",", "angular_errors", "=", "[", "2", "*", "N", ".", "degrees", "(", "i", ")", "for", "i", "in", "angular_errors", "(", "hyp_axes", ")", "]", ")" ]
Serialize an orientation object to a dict suitable for JSON
[ "Serialize", "an", "orientation", "object", "to", "a", "dict", "suitable", "for", "JSON" ]
python
train
28.75
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L799-L818
def data(self, index, role): """Get the information of the levels.""" if not index.isValid(): return None if role == Qt.FontRole: return self._font label = '' if index.column() == self.model.header_shape[1] - 1: label = str(self.model.name(0, index.row())) elif index.row() == self.model.header_shape[0] - 1: label = str(self.model.name(1, index.column())) if role == Qt.DisplayRole and label: return label elif role == Qt.ForegroundRole: return self._foreground elif role == Qt.BackgroundRole: return self._background elif role == Qt.BackgroundRole: return self._palette.window() return None
[ "def", "data", "(", "self", ",", "index", ",", "role", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "None", "if", "role", "==", "Qt", ".", "FontRole", ":", "return", "self", ".", "_font", "label", "=", "''", "if", "index", ".", "column", "(", ")", "==", "self", ".", "model", ".", "header_shape", "[", "1", "]", "-", "1", ":", "label", "=", "str", "(", "self", ".", "model", ".", "name", "(", "0", ",", "index", ".", "row", "(", ")", ")", ")", "elif", "index", ".", "row", "(", ")", "==", "self", ".", "model", ".", "header_shape", "[", "0", "]", "-", "1", ":", "label", "=", "str", "(", "self", ".", "model", ".", "name", "(", "1", ",", "index", ".", "column", "(", ")", ")", ")", "if", "role", "==", "Qt", ".", "DisplayRole", "and", "label", ":", "return", "label", "elif", "role", "==", "Qt", ".", "ForegroundRole", ":", "return", "self", ".", "_foreground", "elif", "role", "==", "Qt", ".", "BackgroundRole", ":", "return", "self", ".", "_background", "elif", "role", "==", "Qt", ".", "BackgroundRole", ":", "return", "self", ".", "_palette", ".", "window", "(", ")", "return", "None" ]
Get the information of the levels.
[ "Get", "the", "information", "of", "the", "levels", "." ]
python
train
38.8
ryan-roemer/django-cloud-browser
cloud_browser/common.py
https://github.com/ryan-roemer/django-cloud-browser/blob/b06cdd24885a6309e843ed924dbf1705b67e7f48/cloud_browser/common.py#L177-L179
def path_join(*args): """Join path parts to single path.""" return SEP.join((x for x in args if x not in (None, ''))).strip(SEP)
[ "def", "path_join", "(", "*", "args", ")", ":", "return", "SEP", ".", "join", "(", "(", "x", "for", "x", "in", "args", "if", "x", "not", "in", "(", "None", ",", "''", ")", ")", ")", ".", "strip", "(", "SEP", ")" ]
Join path parts to single path.
[ "Join", "path", "parts", "to", "single", "path", "." ]
python
train
44.666667
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/zeromq/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/zeromq/driver.py#L64-L73
def on_start(self): """ start publisher """ LOGGER.debug("zeromq.Publisher.on_start") try: self.zmqsocket.bind(self.zmqbind_url) except Exception as e: LOGGER.error("zeromq.Publisher.on_start - error while binding publisher ! " + e.__cause__) raise e
[ "def", "on_start", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"zeromq.Publisher.on_start\"", ")", "try", ":", "self", ".", "zmqsocket", ".", "bind", "(", "self", ".", "zmqbind_url", ")", "except", "Exception", "as", "e", ":", "LOGGER", ".", "error", "(", "\"zeromq.Publisher.on_start - error while binding publisher ! \"", "+", "e", ".", "__cause__", ")", "raise", "e" ]
start publisher
[ "start", "publisher" ]
python
train
32.5
twidi/py-dataql
dataql/solvers/filters.py
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/filters.py#L209-L238
def solve(self, value, filter_): """Get slice or entry defined by an index from the given value. Arguments --------- value : ? A value to solve in combination with the given filter. filter_ : dataql.resource.SliceFilter An instance of ``SliceFilter``to solve with the given value. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> solver = SliceSolver(registry) >>> solver.solve([1, 2, 3], SliceFilter(1)) 2 >>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None))) [2, 3] >>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2))) [1] >>> solver.solve([1, 2, 3], SliceFilter(4)) """ try: return value[filter_.slice or filter_.index] except IndexError: return None
[ "def", "solve", "(", "self", ",", "value", ",", "filter_", ")", ":", "try", ":", "return", "value", "[", "filter_", ".", "slice", "or", "filter_", ".", "index", "]", "except", "IndexError", ":", "return", "None" ]
Get slice or entry defined by an index from the given value. Arguments --------- value : ? A value to solve in combination with the given filter. filter_ : dataql.resource.SliceFilter An instance of ``SliceFilter``to solve with the given value. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> solver = SliceSolver(registry) >>> solver.solve([1, 2, 3], SliceFilter(1)) 2 >>> solver.solve([1, 2, 3], SliceFilter(slice(1, None, None))) [2, 3] >>> solver.solve([1, 2, 3], SliceFilter(slice(0, 2, 2))) [1] >>> solver.solve([1, 2, 3], SliceFilter(4))
[ "Get", "slice", "or", "entry", "defined", "by", "an", "index", "from", "the", "given", "value", "." ]
python
train
29.866667
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L101-L107
def path(self): "Return a list of nodes forming the path from the root to this node." node, path_back = self, [] while node: path_back.append(node) node = node.parent return list(reversed(path_back))
[ "def", "path", "(", "self", ")", ":", "node", ",", "path_back", "=", "self", ",", "[", "]", "while", "node", ":", "path_back", ".", "append", "(", "node", ")", "node", "=", "node", ".", "parent", "return", "list", "(", "reversed", "(", "path_back", ")", ")" ]
Return a list of nodes forming the path from the root to this node.
[ "Return", "a", "list", "of", "nodes", "forming", "the", "path", "from", "the", "root", "to", "this", "node", "." ]
python
valid
35.571429
jazzband/django-queued-storage
queued_storage/backends.py
https://github.com/jazzband/django-queued-storage/blob/f8225d88a01ef5ca8001aeb3f7f80818a022a12d/queued_storage/backends.py#L174-L204
def save(self, name, content, max_length=None): """ Saves the given content with the given name using the local storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed` attribute is ``True`` this will automatically call the :meth:`~queued_storage.backends.QueuedStorage.transfer` method queuing the transfer from local to remote storage. :param name: file name :type name: str :param content: content of the file specified by name :type content: :class:`~django:django.core.files.File` :rtype: str """ cache_key = self.get_cache_key(name) cache.set(cache_key, False) # Use a name that is available on both the local and remote storage # systems and save locally. name = self.get_available_name(name) try: name = self.local.save(name, content, max_length=max_length) except TypeError: # Django < 1.10 name = self.local.save(name, content) # Pass on the cache key to prevent duplicate cache key creation, # we save the result in the storage to be able to test for it if not self.delayed: self.result = self.transfer(name, cache_key=cache_key) return name
[ "def", "save", "(", "self", ",", "name", ",", "content", ",", "max_length", "=", "None", ")", ":", "cache_key", "=", "self", ".", "get_cache_key", "(", "name", ")", "cache", ".", "set", "(", "cache_key", ",", "False", ")", "# Use a name that is available on both the local and remote storage", "# systems and save locally.", "name", "=", "self", ".", "get_available_name", "(", "name", ")", "try", ":", "name", "=", "self", ".", "local", ".", "save", "(", "name", ",", "content", ",", "max_length", "=", "max_length", ")", "except", "TypeError", ":", "# Django < 1.10", "name", "=", "self", ".", "local", ".", "save", "(", "name", ",", "content", ")", "# Pass on the cache key to prevent duplicate cache key creation,", "# we save the result in the storage to be able to test for it", "if", "not", "self", ".", "delayed", ":", "self", ".", "result", "=", "self", ".", "transfer", "(", "name", ",", "cache_key", "=", "cache_key", ")", "return", "name" ]
Saves the given content with the given name using the local storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed` attribute is ``True`` this will automatically call the :meth:`~queued_storage.backends.QueuedStorage.transfer` method queuing the transfer from local to remote storage. :param name: file name :type name: str :param content: content of the file specified by name :type content: :class:`~django:django.core.files.File` :rtype: str
[ "Saves", "the", "given", "content", "with", "the", "given", "name", "using", "the", "local", "storage", ".", "If", "the", ":", "attr", ":", "~queued_storage", ".", "backends", ".", "QueuedStorage", ".", "delayed", "attribute", "is", "True", "this", "will", "automatically", "call", "the", ":", "meth", ":", "~queued_storage", ".", "backends", ".", "QueuedStorage", ".", "transfer", "method", "queuing", "the", "transfer", "from", "local", "to", "remote", "storage", "." ]
python
train
41
ga4gh/ga4gh-server
ga4gh/server/datamodel/reads.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/reads.py#L496-L520
def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReadGroup. """ # TODO this is very incomplete, but we don't have the # implementation to fill out the rest of the fields currently readGroup = protocol.ReadGroup() readGroup.id = self.getId() readGroup.created = self._creationTime readGroup.updated = self._updateTime dataset = self.getParentContainer().getParentContainer() readGroup.dataset_id = dataset.getId() readGroup.name = self.getLocalId() readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize()) referenceSet = self._parentContainer.getReferenceSet() readGroup.sample_name = pb.string(self.getSampleName()) readGroup.biosample_id = pb.string(self.getBiosampleId()) if referenceSet is not None: readGroup.reference_set_id = referenceSet.getId() readGroup.stats.CopyFrom(self.getStats()) readGroup.programs.extend(self.getPrograms()) readGroup.description = pb.string(self.getDescription()) readGroup.experiment.CopyFrom(self.getExperiment()) self.serializeAttributes(readGroup) return readGroup
[ "def", "toProtocolElement", "(", "self", ")", ":", "# TODO this is very incomplete, but we don't have the", "# implementation to fill out the rest of the fields currently", "readGroup", "=", "protocol", ".", "ReadGroup", "(", ")", "readGroup", ".", "id", "=", "self", ".", "getId", "(", ")", "readGroup", ".", "created", "=", "self", ".", "_creationTime", "readGroup", ".", "updated", "=", "self", ".", "_updateTime", "dataset", "=", "self", ".", "getParentContainer", "(", ")", ".", "getParentContainer", "(", ")", "readGroup", ".", "dataset_id", "=", "dataset", ".", "getId", "(", ")", "readGroup", ".", "name", "=", "self", ".", "getLocalId", "(", ")", "readGroup", ".", "predicted_insert_size", "=", "pb", ".", "int", "(", "self", ".", "getPredictedInsertSize", "(", ")", ")", "referenceSet", "=", "self", ".", "_parentContainer", ".", "getReferenceSet", "(", ")", "readGroup", ".", "sample_name", "=", "pb", ".", "string", "(", "self", ".", "getSampleName", "(", ")", ")", "readGroup", ".", "biosample_id", "=", "pb", ".", "string", "(", "self", ".", "getBiosampleId", "(", ")", ")", "if", "referenceSet", "is", "not", "None", ":", "readGroup", ".", "reference_set_id", "=", "referenceSet", ".", "getId", "(", ")", "readGroup", ".", "stats", ".", "CopyFrom", "(", "self", ".", "getStats", "(", ")", ")", "readGroup", ".", "programs", ".", "extend", "(", "self", ".", "getPrograms", "(", ")", ")", "readGroup", ".", "description", "=", "pb", ".", "string", "(", "self", ".", "getDescription", "(", ")", ")", "readGroup", ".", "experiment", ".", "CopyFrom", "(", "self", ".", "getExperiment", "(", ")", ")", "self", ".", "serializeAttributes", "(", "readGroup", ")", "return", "readGroup" ]
Returns the GA4GH protocol representation of this ReadGroup.
[ "Returns", "the", "GA4GH", "protocol", "representation", "of", "this", "ReadGroup", "." ]
python
train
48.92
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L2176-L2193
def get_assessment_offered(self): """Gets the ``AssessmentOffered``. return: (osid.assessment.AssessmentOffered) - the assessment offered raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.Activity.get_objective if not bool(self._my_map['assessmentOfferedId']): raise errors.IllegalState('assessment_offered empty') mgr = self._get_provider_manager('ASSESSMENT') if not mgr.supports_assessment_offered_lookup(): raise errors.OperationFailed('Assessment does not support AssessmentOffered lookup') lookup_session = mgr.get_assessment_offered_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_bank_view() return lookup_session.get_assessment_offered(self.get_assessment_offered_id())
[ "def", "get_assessment_offered", "(", "self", ")", ":", "# Implemented from template for osid.learning.Activity.get_objective", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'assessmentOfferedId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'assessment_offered empty'", ")", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ")", "if", "not", "mgr", ".", "supports_assessment_offered_lookup", "(", ")", ":", "raise", "errors", ".", "OperationFailed", "(", "'Assessment does not support AssessmentOffered lookup'", ")", "lookup_session", "=", "mgr", ".", "get_assessment_offered_lookup_session", "(", "proxy", "=", "getattr", "(", "self", ",", "\"_proxy\"", ",", "None", ")", ")", "lookup_session", ".", "use_federated_bank_view", "(", ")", "return", "lookup_session", ".", "get_assessment_offered", "(", "self", ".", "get_assessment_offered_id", "(", ")", ")" ]
Gets the ``AssessmentOffered``. return: (osid.assessment.AssessmentOffered) - the assessment offered raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "AssessmentOffered", "." ]
python
train
52.5
phalt/beckett
beckett/clients.py
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/clients.py#L142-L157
def _call_api_single_related_resource(self, resource, full_resource_url, method_name, **kwargs): """ For HypermediaResource - make an API call to a known URL """ url = full_resource_url params = { 'headers': self.get_http_headers( resource.Meta.name, method_name, **kwargs), 'url': url } prepared_request = self.prepare_http_request( 'GET', params, **kwargs) response = self.session.send(prepared_request) return self._handle_response( response, resource.Meta.valid_status_codes, resource)
[ "def", "_call_api_single_related_resource", "(", "self", ",", "resource", ",", "full_resource_url", ",", "method_name", ",", "*", "*", "kwargs", ")", ":", "url", "=", "full_resource_url", "params", "=", "{", "'headers'", ":", "self", ".", "get_http_headers", "(", "resource", ".", "Meta", ".", "name", ",", "method_name", ",", "*", "*", "kwargs", ")", ",", "'url'", ":", "url", "}", "prepared_request", "=", "self", ".", "prepare_http_request", "(", "'GET'", ",", "params", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "session", ".", "send", "(", "prepared_request", ")", "return", "self", ".", "_handle_response", "(", "response", ",", "resource", ".", "Meta", ".", "valid_status_codes", ",", "resource", ")" ]
For HypermediaResource - make an API call to a known URL
[ "For", "HypermediaResource", "-", "make", "an", "API", "call", "to", "a", "known", "URL" ]
python
train
40.75
sonyxperiadev/pygerrit
pygerrit/client.py
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/client.py#L131-L135
def start_event_stream(self): """ Start streaming events from `gerrit stream-events`. """ if not self._stream: self._stream = GerritStream(self, ssh_client=self._ssh_client) self._stream.start()
[ "def", "start_event_stream", "(", "self", ")", ":", "if", "not", "self", ".", "_stream", ":", "self", ".", "_stream", "=", "GerritStream", "(", "self", ",", "ssh_client", "=", "self", ".", "_ssh_client", ")", "self", ".", "_stream", ".", "start", "(", ")" ]
Start streaming events from `gerrit stream-events`.
[ "Start", "streaming", "events", "from", "gerrit", "stream", "-", "events", "." ]
python
train
46
dpkp/kafka-python
kafka/consumer/group.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/consumer/group.py#L922-L966
def offsets_for_times(self, timestamps): """Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. This is a blocking call. The consumer does not have to be assigned the partitions. If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, ``None`` will be returned for that partition. ``None`` will also be returned for the partition if there are no messages in it. Note: This method may block indefinitely if the partition does not exist. Arguments: timestamps (dict): ``{TopicPartition: int}`` mapping from partition to the timestamp to look up. Unit should be milliseconds since beginning of the epoch (midnight Jan 1, 1970 (UTC)) Returns: ``{TopicPartition: OffsetAndTimestamp}``: mapping from partition to the timestamp and offset of the first message with timestamp greater than or equal to the target timestamp. Raises: ValueError: If the target timestamp is negative UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms """ if self.config['api_version'] <= (0, 10, 0): raise UnsupportedVersionError( "offsets_for_times API not supported for cluster version {}" .format(self.config['api_version'])) for tp, ts in six.iteritems(timestamps): timestamps[tp] = int(ts) if ts < 0: raise ValueError( "The target time for partition {} is {}. The target time " "cannot be negative.".format(tp, ts)) return self._fetcher.get_offsets_by_times( timestamps, self.config['request_timeout_ms'])
[ "def", "offsets_for_times", "(", "self", ",", "timestamps", ")", ":", "if", "self", ".", "config", "[", "'api_version'", "]", "<=", "(", "0", ",", "10", ",", "0", ")", ":", "raise", "UnsupportedVersionError", "(", "\"offsets_for_times API not supported for cluster version {}\"", ".", "format", "(", "self", ".", "config", "[", "'api_version'", "]", ")", ")", "for", "tp", ",", "ts", "in", "six", ".", "iteritems", "(", "timestamps", ")", ":", "timestamps", "[", "tp", "]", "=", "int", "(", "ts", ")", "if", "ts", "<", "0", ":", "raise", "ValueError", "(", "\"The target time for partition {} is {}. The target time \"", "\"cannot be negative.\"", ".", "format", "(", "tp", ",", "ts", ")", ")", "return", "self", ".", "_fetcher", ".", "get_offsets_by_times", "(", "timestamps", ",", "self", ".", "config", "[", "'request_timeout_ms'", "]", ")" ]
Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition. This is a blocking call. The consumer does not have to be assigned the partitions. If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, ``None`` will be returned for that partition. ``None`` will also be returned for the partition if there are no messages in it. Note: This method may block indefinitely if the partition does not exist. Arguments: timestamps (dict): ``{TopicPartition: int}`` mapping from partition to the timestamp to look up. Unit should be milliseconds since beginning of the epoch (midnight Jan 1, 1970 (UTC)) Returns: ``{TopicPartition: OffsetAndTimestamp}``: mapping from partition to the timestamp and offset of the first message with timestamp greater than or equal to the target timestamp. Raises: ValueError: If the target timestamp is negative UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms
[ "Look", "up", "the", "offsets", "for", "the", "given", "partitions", "by", "timestamp", ".", "The", "returned", "offset", "for", "each", "partition", "is", "the", "earliest", "offset", "whose", "timestamp", "is", "greater", "than", "or", "equal", "to", "the", "given", "timestamp", "in", "the", "corresponding", "partition", "." ]
python
train
46.222222
72squared/redpipe
redpipe/fields.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/fields.py#L242-L254
def decode(cls, value): """ take a utf-8 encoded byte-string from redis and turn it back into a list :param value: bytes :return: list """ try: return None if value is None else \ list(json.loads(value.decode(cls._encoding))) except (TypeError, AttributeError): return list(value)
[ "def", "decode", "(", "cls", ",", "value", ")", ":", "try", ":", "return", "None", "if", "value", "is", "None", "else", "list", "(", "json", ".", "loads", "(", "value", ".", "decode", "(", "cls", ".", "_encoding", ")", ")", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "return", "list", "(", "value", ")" ]
take a utf-8 encoded byte-string from redis and turn it back into a list :param value: bytes :return: list
[ "take", "a", "utf", "-", "8", "encoded", "byte", "-", "string", "from", "redis", "and", "turn", "it", "back", "into", "a", "list" ]
python
train
28.692308
swisscom/cleanerversion
versions/fields.py
https://github.com/swisscom/cleanerversion/blob/becadbab5d7b474a0e9a596b99e97682402d2f2c/versions/fields.py#L46-L63
def get_extra_restriction(self, where_class, alias, remote_alias): """ Overrides ForeignObject's get_extra_restriction function that returns an SQL statement which is appended to a JOIN's conditional filtering part :return: SQL conditional statement :rtype: WhereNode """ historic_sql = '''{alias}.version_start_date <= %s AND ({alias}.version_end_date > %s OR {alias}.version_end_date is NULL )''' current_sql = '''{alias}.version_end_date is NULL''' # How 'bout creating an ExtraWhere here, without params return where_class([VersionedExtraWhere(historic_sql=historic_sql, current_sql=current_sql, alias=alias, remote_alias=remote_alias)])
[ "def", "get_extra_restriction", "(", "self", ",", "where_class", ",", "alias", ",", "remote_alias", ")", ":", "historic_sql", "=", "'''{alias}.version_start_date <= %s\n AND ({alias}.version_end_date > %s\n OR {alias}.version_end_date is NULL )'''", "current_sql", "=", "'''{alias}.version_end_date is NULL'''", "# How 'bout creating an ExtraWhere here, without params", "return", "where_class", "(", "[", "VersionedExtraWhere", "(", "historic_sql", "=", "historic_sql", ",", "current_sql", "=", "current_sql", ",", "alias", "=", "alias", ",", "remote_alias", "=", "remote_alias", ")", "]", ")" ]
Overrides ForeignObject's get_extra_restriction function that returns an SQL statement which is appended to a JOIN's conditional filtering part :return: SQL conditional statement :rtype: WhereNode
[ "Overrides", "ForeignObject", "s", "get_extra_restriction", "function", "that", "returns", "an", "SQL", "statement", "which", "is", "appended", "to", "a", "JOIN", "s", "conditional", "filtering", "part" ]
python
train
50.944444
DataONEorg/d1_python
lib_client/src/d1_client/cnclient.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/cnclient.py#L912-L935
def updateReplicationMetadataResponse( self, pid, replicaMetadata, serialVersion, vendorSpecific=None ): """CNReplication.updateReplicationMetadata(session, pid, replicaMetadata, serialVersion) → boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata Not implemented. Args: pid: replicaMetadata: serialVersion: vendorSpecific: Returns: """ mmp_dict = { 'replicaMetadata': ('replicaMetadata.xml', replicaMetadata.toxml('utf-8')), 'serialVersion': str(serialVersion), } return self.PUT( ['replicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific )
[ "def", "updateReplicationMetadataResponse", "(", "self", ",", "pid", ",", "replicaMetadata", ",", "serialVersion", ",", "vendorSpecific", "=", "None", ")", ":", "mmp_dict", "=", "{", "'replicaMetadata'", ":", "(", "'replicaMetadata.xml'", ",", "replicaMetadata", ".", "toxml", "(", "'utf-8'", ")", ")", ",", "'serialVersion'", ":", "str", "(", "serialVersion", ")", ",", "}", "return", "self", ".", "PUT", "(", "[", "'replicaMetadata'", ",", "pid", "]", ",", "fields", "=", "mmp_dict", ",", "headers", "=", "vendorSpecific", ")" ]
CNReplication.updateReplicationMetadata(session, pid, replicaMetadata, serialVersion) → boolean https://releases.dataone.org/online/api- documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata Not implemented. Args: pid: replicaMetadata: serialVersion: vendorSpecific: Returns:
[ "CNReplication", ".", "updateReplicationMetadata", "(", "session", "pid", "replicaMetadata", "serialVersion", ")", "→", "boolean", "https", ":", "//", "releases", ".", "dataone", ".", "org", "/", "online", "/", "api", "-", "documentation", "-", "v2", ".", "0", ".", "1", "/", "apis", "/", "CN_AP", "Is", ".", "html#CNReplication", ".", "updateReplicationMetadata", "Not", "implemented", "." ]
python
train
32.583333
tensorflow/mesh
mesh_tensorflow/transformer/transformer.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L959-L1005
def make_bitransformer( input_vocab_size=gin.REQUIRED, output_vocab_size=gin.REQUIRED, layout=None, mesh_shape=None): """Gin-configurable bitransformer constructor. In your config file you need to set the encoder and decoder layers like this: encoder/make_layer_stack.layers = [ @transformer_layers.SelfAttention, @transformer_layers.DenseReluDense, ] decoder/make_layer_stack.layers = [ @transformer_layers.SelfAttention, @transformer_layers.EncDecAttention, @transformer_layers.DenseReluDense, ] Args: input_vocab_size: a integer output_vocab_size: an integer layout: optional - an input to mtf.convert_to_layout_rules Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape mesh_shape: optional - an input to mtf.convert_to_shape Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape Returns: a Bitransformer """ with gin.config_scope("encoder"): encoder = Unitransformer( layer_stack=make_layer_stack(), input_vocab_size=input_vocab_size, output_vocab_size=None, autoregressive=False, name="encoder", layout=layout, mesh_shape=mesh_shape) with gin.config_scope("decoder"): decoder = Unitransformer( layer_stack=make_layer_stack(), input_vocab_size=output_vocab_size, output_vocab_size=output_vocab_size, autoregressive=True, name="decoder", layout=layout, mesh_shape=mesh_shape) return Bitransformer(encoder, decoder)
[ "def", "make_bitransformer", "(", "input_vocab_size", "=", "gin", ".", "REQUIRED", ",", "output_vocab_size", "=", "gin", ".", "REQUIRED", ",", "layout", "=", "None", ",", "mesh_shape", "=", "None", ")", ":", "with", "gin", ".", "config_scope", "(", "\"encoder\"", ")", ":", "encoder", "=", "Unitransformer", "(", "layer_stack", "=", "make_layer_stack", "(", ")", ",", "input_vocab_size", "=", "input_vocab_size", ",", "output_vocab_size", "=", "None", ",", "autoregressive", "=", "False", ",", "name", "=", "\"encoder\"", ",", "layout", "=", "layout", ",", "mesh_shape", "=", "mesh_shape", ")", "with", "gin", ".", "config_scope", "(", "\"decoder\"", ")", ":", "decoder", "=", "Unitransformer", "(", "layer_stack", "=", "make_layer_stack", "(", ")", ",", "input_vocab_size", "=", "output_vocab_size", ",", "output_vocab_size", "=", "output_vocab_size", ",", "autoregressive", "=", "True", ",", "name", "=", "\"decoder\"", ",", "layout", "=", "layout", ",", "mesh_shape", "=", "mesh_shape", ")", "return", "Bitransformer", "(", "encoder", ",", "decoder", ")" ]
Gin-configurable bitransformer constructor. In your config file you need to set the encoder and decoder layers like this: encoder/make_layer_stack.layers = [ @transformer_layers.SelfAttention, @transformer_layers.DenseReluDense, ] decoder/make_layer_stack.layers = [ @transformer_layers.SelfAttention, @transformer_layers.EncDecAttention, @transformer_layers.DenseReluDense, ] Args: input_vocab_size: a integer output_vocab_size: an integer layout: optional - an input to mtf.convert_to_layout_rules Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape mesh_shape: optional - an input to mtf.convert_to_shape Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape Returns: a Bitransformer
[ "Gin", "-", "configurable", "bitransformer", "constructor", "." ]
python
train
32.404255
angr/angr
angr/analyses/cfg/cfg_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L1322-L1473
def _process_irrational_functions(self, functions, predetermined_function_addrs, blockaddr_to_function): """ For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually the following pattern is seen: sub_0x400010: push ebp mov esp, ebp ... cmp eax, 10 ja end mov eax, jumptable[eax] jmp eax sub_0x400080: # do something here jmp end end (0x400e00): pop ebp ret In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function 0x400010. :param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones that are misidentified as functions. :param dict blockaddr_to_function: A mapping between block addresses and Function instances. :return: A set of addresses of all removed functions :rtype: set """ functions_to_remove = { } functions_can_be_removed = set(functions.keys()) - set(predetermined_function_addrs) for func_addr, function in functions.items(): if func_addr in functions_to_remove: continue # check all blocks and see if any block ends with an indirect jump and is not resolved has_unresolved_jumps = False # the functions to merge with must be locating between the unresolved basic block address and the endpoint # of the current function max_unresolved_jump_addr = 0 for block_addr in function.block_addrs_set: if block_addr in self.indirect_jumps and \ self.indirect_jumps[block_addr].jumpkind == 'Ijk_Boring' and \ not self.indirect_jumps[block_addr].resolved_targets: # it's not resolved # we should also make sure it's a jump, not a call has_unresolved_jumps = True max_unresolved_jump_addr = max(max_unresolved_jump_addr, block_addr) if not has_unresolved_jumps: continue if function.startpoint is None: continue startpoint_addr = function.startpoint.addr if not function.endpoints: # Function should have at least one endpoint continue endpoint_addr = max([ a.addr for a in function.endpoints ]) the_endpoint = next(a for a in function.endpoints if a.addr == endpoint_addr) endpoint_addr += the_endpoint.size # sanity check: startpoint of the function should be greater than its endpoint if startpoint_addr >= endpoint_addr: continue if max_unresolved_jump_addr <= startpoint_addr or max_unresolved_jump_addr >= endpoint_addr: continue # scan forward from the endpoint to include any function tail jumps # Here is an example: # loc_8049562: # mov eax, ebp # add esp, 3ch # ... # ret # loc_804956c: # mov ebp, 3 # jmp loc_8049562 # loc_8049573: # mov ebp, 4 # jmp loc_8049562 # last_addr = endpoint_addr tmp_state = self.project.factory.blank_state(mode='fastpath') while True: try: # using successors is slow, but acceptable since we won't be creating millions of blocks here... tmp_state.ip = last_addr b = self.project.factory.successors(tmp_state, jumpkind='Ijk_Boring') if len(b.successors) != 1: break if b.successors[0].history.jumpkind not in ('Ijk_Boring', 'Ijk_InvalICache'): break if b.successors[0].ip.symbolic: break suc_addr = b.successors[0].ip._model_concrete if max(startpoint_addr, the_endpoint.addr - 0x40) <= suc_addr < the_endpoint.addr + the_endpoint.size: # increment the endpoint_addr endpoint_addr = b.addr + b.artifacts['irsb_size'] else: break last_addr = b.addr + b.artifacts['irsb_size'] except (SimTranslationError, SimMemoryError, SimIRSBError, SimEngineError): break # find all functions that are between [ startpoint, endpoint ] should_merge = True functions_to_merge = set() for f_addr in functions_can_be_removed: f = functions[f_addr] if f_addr == func_addr: continue if max_unresolved_jump_addr < f_addr < endpoint_addr and \ all([max_unresolved_jump_addr < b_addr < endpoint_addr for b_addr in f.block_addrs]): if f_addr in functions_to_remove: # this function has already been merged with other functions before... it cannot be merged with # this function anymore should_merge = False break if f_addr in predetermined_function_addrs: # this function is a legit one. it shouldn't be removed/merged should_merge = False break functions_to_merge.add(f_addr) if not should_merge: # we shouldn't merge... continue for f_addr in functions_to_merge: functions_to_remove[f_addr] = func_addr # merge all functions for to_remove, merge_with in functions_to_remove.items(): func_merge_with = self._addr_to_function(merge_with, blockaddr_to_function, functions) for block_addr in functions[to_remove].block_addrs: blockaddr_to_function[block_addr] = func_merge_with del functions[to_remove] return set(functions_to_remove.keys())
[ "def", "_process_irrational_functions", "(", "self", ",", "functions", ",", "predetermined_function_addrs", ",", "blockaddr_to_function", ")", ":", "functions_to_remove", "=", "{", "}", "functions_can_be_removed", "=", "set", "(", "functions", ".", "keys", "(", ")", ")", "-", "set", "(", "predetermined_function_addrs", ")", "for", "func_addr", ",", "function", "in", "functions", ".", "items", "(", ")", ":", "if", "func_addr", "in", "functions_to_remove", ":", "continue", "# check all blocks and see if any block ends with an indirect jump and is not resolved", "has_unresolved_jumps", "=", "False", "# the functions to merge with must be locating between the unresolved basic block address and the endpoint", "# of the current function", "max_unresolved_jump_addr", "=", "0", "for", "block_addr", "in", "function", ".", "block_addrs_set", ":", "if", "block_addr", "in", "self", ".", "indirect_jumps", "and", "self", ".", "indirect_jumps", "[", "block_addr", "]", ".", "jumpkind", "==", "'Ijk_Boring'", "and", "not", "self", ".", "indirect_jumps", "[", "block_addr", "]", ".", "resolved_targets", ":", "# it's not resolved", "# we should also make sure it's a jump, not a call", "has_unresolved_jumps", "=", "True", "max_unresolved_jump_addr", "=", "max", "(", "max_unresolved_jump_addr", ",", "block_addr", ")", "if", "not", "has_unresolved_jumps", ":", "continue", "if", "function", ".", "startpoint", "is", "None", ":", "continue", "startpoint_addr", "=", "function", ".", "startpoint", ".", "addr", "if", "not", "function", ".", "endpoints", ":", "# Function should have at least one endpoint", "continue", "endpoint_addr", "=", "max", "(", "[", "a", ".", "addr", "for", "a", "in", "function", ".", "endpoints", "]", ")", "the_endpoint", "=", "next", "(", "a", "for", "a", "in", "function", ".", "endpoints", "if", "a", ".", "addr", "==", "endpoint_addr", ")", "endpoint_addr", "+=", "the_endpoint", ".", "size", "# sanity check: startpoint of the function should be greater than its endpoint", "if", "startpoint_addr", ">=", "endpoint_addr", ":", "continue", "if", "max_unresolved_jump_addr", "<=", "startpoint_addr", "or", "max_unresolved_jump_addr", ">=", "endpoint_addr", ":", "continue", "# scan forward from the endpoint to include any function tail jumps", "# Here is an example:", "# loc_8049562:", "# mov eax, ebp", "# add esp, 3ch", "# ...", "# ret", "# loc_804956c:", "# mov ebp, 3", "# jmp loc_8049562", "# loc_8049573:", "# mov ebp, 4", "# jmp loc_8049562", "#", "last_addr", "=", "endpoint_addr", "tmp_state", "=", "self", ".", "project", ".", "factory", ".", "blank_state", "(", "mode", "=", "'fastpath'", ")", "while", "True", ":", "try", ":", "# using successors is slow, but acceptable since we won't be creating millions of blocks here...", "tmp_state", ".", "ip", "=", "last_addr", "b", "=", "self", ".", "project", ".", "factory", ".", "successors", "(", "tmp_state", ",", "jumpkind", "=", "'Ijk_Boring'", ")", "if", "len", "(", "b", ".", "successors", ")", "!=", "1", ":", "break", "if", "b", ".", "successors", "[", "0", "]", ".", "history", ".", "jumpkind", "not", "in", "(", "'Ijk_Boring'", ",", "'Ijk_InvalICache'", ")", ":", "break", "if", "b", ".", "successors", "[", "0", "]", ".", "ip", ".", "symbolic", ":", "break", "suc_addr", "=", "b", ".", "successors", "[", "0", "]", ".", "ip", ".", "_model_concrete", "if", "max", "(", "startpoint_addr", ",", "the_endpoint", ".", "addr", "-", "0x40", ")", "<=", "suc_addr", "<", "the_endpoint", ".", "addr", "+", "the_endpoint", ".", "size", ":", "# increment the endpoint_addr", "endpoint_addr", "=", "b", ".", "addr", "+", "b", ".", "artifacts", "[", "'irsb_size'", "]", "else", ":", "break", "last_addr", "=", "b", ".", "addr", "+", "b", ".", "artifacts", "[", "'irsb_size'", "]", "except", "(", "SimTranslationError", ",", "SimMemoryError", ",", "SimIRSBError", ",", "SimEngineError", ")", ":", "break", "# find all functions that are between [ startpoint, endpoint ]", "should_merge", "=", "True", "functions_to_merge", "=", "set", "(", ")", "for", "f_addr", "in", "functions_can_be_removed", ":", "f", "=", "functions", "[", "f_addr", "]", "if", "f_addr", "==", "func_addr", ":", "continue", "if", "max_unresolved_jump_addr", "<", "f_addr", "<", "endpoint_addr", "and", "all", "(", "[", "max_unresolved_jump_addr", "<", "b_addr", "<", "endpoint_addr", "for", "b_addr", "in", "f", ".", "block_addrs", "]", ")", ":", "if", "f_addr", "in", "functions_to_remove", ":", "# this function has already been merged with other functions before... it cannot be merged with", "# this function anymore", "should_merge", "=", "False", "break", "if", "f_addr", "in", "predetermined_function_addrs", ":", "# this function is a legit one. it shouldn't be removed/merged", "should_merge", "=", "False", "break", "functions_to_merge", ".", "add", "(", "f_addr", ")", "if", "not", "should_merge", ":", "# we shouldn't merge...", "continue", "for", "f_addr", "in", "functions_to_merge", ":", "functions_to_remove", "[", "f_addr", "]", "=", "func_addr", "# merge all functions", "for", "to_remove", ",", "merge_with", "in", "functions_to_remove", ".", "items", "(", ")", ":", "func_merge_with", "=", "self", ".", "_addr_to_function", "(", "merge_with", ",", "blockaddr_to_function", ",", "functions", ")", "for", "block_addr", "in", "functions", "[", "to_remove", "]", ".", "block_addrs", ":", "blockaddr_to_function", "[", "block_addr", "]", "=", "func_merge_with", "del", "functions", "[", "to_remove", "]", "return", "set", "(", "functions_to_remove", ".", "keys", "(", ")", ")" ]
For unresolveable indirect jumps, angr marks those jump targets as individual functions. For example, usually the following pattern is seen: sub_0x400010: push ebp mov esp, ebp ... cmp eax, 10 ja end mov eax, jumptable[eax] jmp eax sub_0x400080: # do something here jmp end end (0x400e00): pop ebp ret In the example above, `process_irrational_functions` will remove function 0x400080, and merge it with function 0x400010. :param angr.knowledge_plugins.FunctionManager functions: all functions that angr recovers, including those ones that are misidentified as functions. :param dict blockaddr_to_function: A mapping between block addresses and Function instances. :return: A set of addresses of all removed functions :rtype: set
[ "For", "unresolveable", "indirect", "jumps", "angr", "marks", "those", "jump", "targets", "as", "individual", "functions", ".", "For", "example", "usually", "the", "following", "pattern", "is", "seen", ":" ]
python
train
41.368421
jeffknupp/sandman2
sandman2/app.py
https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/app.py#L176-L187
def _register_user_models(user_models, admin=None, schema=None): """Register any user-defined models with the API Service. :param list user_models: A list of user-defined models to include in the API service """ if any([issubclass(cls, AutomapModel) for cls in user_models]): AutomapModel.prepare( # pylint:disable=maybe-no-member db.engine, reflect=True, schema=schema) for user_model in user_models: register_model(user_model, admin)
[ "def", "_register_user_models", "(", "user_models", ",", "admin", "=", "None", ",", "schema", "=", "None", ")", ":", "if", "any", "(", "[", "issubclass", "(", "cls", ",", "AutomapModel", ")", "for", "cls", "in", "user_models", "]", ")", ":", "AutomapModel", ".", "prepare", "(", "# pylint:disable=maybe-no-member", "db", ".", "engine", ",", "reflect", "=", "True", ",", "schema", "=", "schema", ")", "for", "user_model", "in", "user_models", ":", "register_model", "(", "user_model", ",", "admin", ")" ]
Register any user-defined models with the API Service. :param list user_models: A list of user-defined models to include in the API service
[ "Register", "any", "user", "-", "defined", "models", "with", "the", "API", "Service", "." ]
python
train
43.583333
inasafe/inasafe
safe/gis/vector/prepare_vector_layer.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/vector/prepare_vector_layer.py#L451-L503
def sum_fields(layer, output_field_key, input_fields): """Sum the value of input_fields and put it as output_field. :param layer: The vector layer. :type layer: QgsVectorLayer :param output_field_key: The output field definition key. :type output_field_key: basestring :param input_fields: List of input fields' name. :type input_fields: list """ field_definition = definition(output_field_key) output_field_name = field_definition['field_name'] # If the fields only has one element if len(input_fields) == 1: # Name is different, copy it if input_fields[0] != output_field_name: to_rename = {input_fields[0]: output_field_name} # We copy only, it will be deleted later. # We can't rename the field, we need to copy it as the same # field might be used many times in the FMT tool. copy_fields(layer, to_rename) else: # Name is same, do nothing return else: # Creating expression # Put field name in a double quote. See #4248 input_fields = ['"%s"' % f for f in input_fields] string_expression = ' + '.join(input_fields) sum_expression = QgsExpression(string_expression) context = QgsExpressionContext() context.setFields(layer.fields()) sum_expression.prepare(context) # Get the output field index output_idx = layer.fields().lookupField(output_field_name) # Output index is not found layer.startEditing() if output_idx == -1: output_field = create_field_from_definition(field_definition) layer.addAttribute(output_field) output_idx = layer.fields().lookupField(output_field_name) # Iterate to all features for feature in layer.getFeatures(): context.setFeature(feature) result = sum_expression.evaluate(context) feature[output_idx] = result layer.updateFeature(feature) layer.commitChanges()
[ "def", "sum_fields", "(", "layer", ",", "output_field_key", ",", "input_fields", ")", ":", "field_definition", "=", "definition", "(", "output_field_key", ")", "output_field_name", "=", "field_definition", "[", "'field_name'", "]", "# If the fields only has one element", "if", "len", "(", "input_fields", ")", "==", "1", ":", "# Name is different, copy it", "if", "input_fields", "[", "0", "]", "!=", "output_field_name", ":", "to_rename", "=", "{", "input_fields", "[", "0", "]", ":", "output_field_name", "}", "# We copy only, it will be deleted later.", "# We can't rename the field, we need to copy it as the same", "# field might be used many times in the FMT tool.", "copy_fields", "(", "layer", ",", "to_rename", ")", "else", ":", "# Name is same, do nothing", "return", "else", ":", "# Creating expression", "# Put field name in a double quote. See #4248", "input_fields", "=", "[", "'\"%s\"'", "%", "f", "for", "f", "in", "input_fields", "]", "string_expression", "=", "' + '", ".", "join", "(", "input_fields", ")", "sum_expression", "=", "QgsExpression", "(", "string_expression", ")", "context", "=", "QgsExpressionContext", "(", ")", "context", ".", "setFields", "(", "layer", ".", "fields", "(", ")", ")", "sum_expression", ".", "prepare", "(", "context", ")", "# Get the output field index", "output_idx", "=", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "output_field_name", ")", "# Output index is not found", "layer", ".", "startEditing", "(", ")", "if", "output_idx", "==", "-", "1", ":", "output_field", "=", "create_field_from_definition", "(", "field_definition", ")", "layer", ".", "addAttribute", "(", "output_field", ")", "output_idx", "=", "layer", ".", "fields", "(", ")", ".", "lookupField", "(", "output_field_name", ")", "# Iterate to all features", "for", "feature", "in", "layer", ".", "getFeatures", "(", ")", ":", "context", ".", "setFeature", "(", "feature", ")", "result", "=", "sum_expression", ".", "evaluate", "(", "context", ")", "feature", "[", "output_idx", "]", "=", "result", "layer", ".", "updateFeature", "(", "feature", ")", "layer", ".", "commitChanges", "(", ")" ]
Sum the value of input_fields and put it as output_field. :param layer: The vector layer. :type layer: QgsVectorLayer :param output_field_key: The output field definition key. :type output_field_key: basestring :param input_fields: List of input fields' name. :type input_fields: list
[ "Sum", "the", "value", "of", "input_fields", "and", "put", "it", "as", "output_field", "." ]
python
train
38.075472
gnarlychicken/aiohttp_auth
aiohttp_auth/acl/decorators.py
https://github.com/gnarlychicken/aiohttp_auth/blob/3d55236889fb14b662279b050de18d43842bb886/aiohttp_auth/acl/decorators.py#L6-L44
def acl_required(permission, context): """Returns a decorator that checks if a user has the requested permission from the passed acl context. This function constructs a decorator that can be used to check a aiohttp's view for authorization before calling it. It uses the get_permission() function to check the request against the passed permission and context. If the user does not have the correct permission to run this function, it raises HTTPForbidden. Args: permission: The specific permission requested. context: Either a sequence of ACL tuples, or a callable that returns a sequence of ACL tuples. For more information on ACL tuples, see get_permission() Returns: A decorator which will check the request passed has the permission for the given context. The decorator will raise HTTPForbidden if the user does not have the correct permissions to access the view. """ def decorator(func): @wraps(func) async def wrapper(*args): request = args[-1] if callable(context): context = context() if await get_permitted(request, permission, context): return await func(*args) raise web.HTTPForbidden() return wrapper return decorator
[ "def", "acl_required", "(", "permission", ",", "context", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "async", "def", "wrapper", "(", "*", "args", ")", ":", "request", "=", "args", "[", "-", "1", "]", "if", "callable", "(", "context", ")", ":", "context", "=", "context", "(", ")", "if", "await", "get_permitted", "(", "request", ",", "permission", ",", "context", ")", ":", "return", "await", "func", "(", "*", "args", ")", "raise", "web", ".", "HTTPForbidden", "(", ")", "return", "wrapper", "return", "decorator" ]
Returns a decorator that checks if a user has the requested permission from the passed acl context. This function constructs a decorator that can be used to check a aiohttp's view for authorization before calling it. It uses the get_permission() function to check the request against the passed permission and context. If the user does not have the correct permission to run this function, it raises HTTPForbidden. Args: permission: The specific permission requested. context: Either a sequence of ACL tuples, or a callable that returns a sequence of ACL tuples. For more information on ACL tuples, see get_permission() Returns: A decorator which will check the request passed has the permission for the given context. The decorator will raise HTTPForbidden if the user does not have the correct permissions to access the view.
[ "Returns", "a", "decorator", "that", "checks", "if", "a", "user", "has", "the", "requested", "permission", "from", "the", "passed", "acl", "context", "." ]
python
train
33.820513
nicolargo/glances
glances/config.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/config.py#L33-L51
def user_config_dir(): r"""Return the per-user config dir (full path). - Linux, *BSD, SunOS: ~/.config/glances - macOS: ~/Library/Application Support/glances - Windows: %APPDATA%\glances """ if WINDOWS: path = os.environ.get('APPDATA') elif MACOS: path = os.path.expanduser('~/Library/Application Support') else: path = os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config') if path is None: path = '' else: path = os.path.join(path, 'glances') return path
[ "def", "user_config_dir", "(", ")", ":", "if", "WINDOWS", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'APPDATA'", ")", "elif", "MACOS", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "'~/Library/Application Support'", ")", "else", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "'XDG_CONFIG_HOME'", ")", "or", "os", ".", "path", ".", "expanduser", "(", "'~/.config'", ")", "if", "path", "is", "None", ":", "path", "=", "''", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'glances'", ")", "return", "path" ]
r"""Return the per-user config dir (full path). - Linux, *BSD, SunOS: ~/.config/glances - macOS: ~/Library/Application Support/glances - Windows: %APPDATA%\glances
[ "r", "Return", "the", "per", "-", "user", "config", "dir", "(", "full", "path", ")", "." ]
python
train
28.315789
MoseleyBioinformaticsLab/filehandles
filehandles/filehandles.py
https://github.com/MoseleyBioinformaticsLab/filehandles/blob/dd09354a2f12c315fb5c6fa5d6919e1d7ae3e076/filehandles/filehandles.py#L135-L153
def ziparchive_opener(path, pattern='', verbose=False): """Opener that opens files from zip archive.. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ with zipfile.ZipFile(io.BytesIO(urlopen(path).read()), 'r') if is_url(path) else zipfile.ZipFile(path, 'r') as ziparchive: for zipinfo in ziparchive.infolist(): if not zipinfo.filename.endswith('/'): source = os.path.join(path, zipinfo.filename) if pattern and not re.match(pattern, zipinfo.filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(zipinfo.filename), pattern)) continue logger.verbose('Processing file: {}'.format(source)) filehandle = ziparchive.open(zipinfo) yield filehandle
[ "def", "ziparchive_opener", "(", "path", ",", "pattern", "=", "''", ",", "verbose", "=", "False", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "io", ".", "BytesIO", "(", "urlopen", "(", "path", ")", ".", "read", "(", ")", ")", ",", "'r'", ")", "if", "is_url", "(", "path", ")", "else", "zipfile", ".", "ZipFile", "(", "path", ",", "'r'", ")", "as", "ziparchive", ":", "for", "zipinfo", "in", "ziparchive", ".", "infolist", "(", ")", ":", "if", "not", "zipinfo", ".", "filename", ".", "endswith", "(", "'/'", ")", ":", "source", "=", "os", ".", "path", ".", "join", "(", "path", ",", "zipinfo", ".", "filename", ")", "if", "pattern", "and", "not", "re", ".", "match", "(", "pattern", ",", "zipinfo", ".", "filename", ")", ":", "logger", ".", "verbose", "(", "'Skipping file: {}, did not match regex pattern \"{}\"'", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "zipinfo", ".", "filename", ")", ",", "pattern", ")", ")", "continue", "logger", ".", "verbose", "(", "'Processing file: {}'", ".", "format", "(", "source", ")", ")", "filehandle", "=", "ziparchive", ".", "open", "(", "zipinfo", ")", "yield", "filehandle" ]
Opener that opens files from zip archive.. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s).
[ "Opener", "that", "opens", "files", "from", "zip", "archive", ".." ]
python
train
46.789474
meejah/txtorcon
txtorcon/torconfig.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/torconfig.py#L672-L725
def create_socks_endpoint(self, reactor, socks_config): """ Creates a new TorSocksEndpoint instance given a valid configuration line for ``SocksPort``; if this configuration isn't already in the underlying tor, we add it. Note that this method may call :meth:`txtorcon.TorConfig.save()` on this instance. Note that calling this with `socks_config=None` is equivalent to calling `.socks_endpoint` (which is not async). XXX socks_config should be .. i dunno, but there's fucking options and craziness, e.g. default Tor Browser Bundle is: ['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth', '9155'] XXX maybe we should say "socks_port" as the 3rd arg, insist it's an int, and then allow/support all the other options (e.g. via kwargs) XXX we could avoid the "maybe call .save()" thing; worth it? (actually, no we can't or the Tor won't have it config'd) """ yield self.post_bootstrap if socks_config is None: if len(self.SocksPort) == 0: raise RuntimeError( "socks_port is None and Tor has no SocksPorts configured" ) socks_config = self.SocksPort[0] else: if not any([socks_config in port for port in self.SocksPort]): # need to configure Tor self.SocksPort.append(socks_config) try: yield self.save() except TorProtocolError as e: extra = '' if socks_config.startswith('unix:'): # XXX so why don't we check this for the # caller, earlier on? extra = '\nNote Tor has specific ownership/permissions ' +\ 'requirements for unix sockets and parent dir.' raise RuntimeError( "While configuring SOCKSPort to '{}', error from" " Tor: {}{}".format( socks_config, e, extra ) ) defer.returnValue( _endpoint_from_socksport_line(reactor, socks_config) )
[ "def", "create_socks_endpoint", "(", "self", ",", "reactor", ",", "socks_config", ")", ":", "yield", "self", ".", "post_bootstrap", "if", "socks_config", "is", "None", ":", "if", "len", "(", "self", ".", "SocksPort", ")", "==", "0", ":", "raise", "RuntimeError", "(", "\"socks_port is None and Tor has no SocksPorts configured\"", ")", "socks_config", "=", "self", ".", "SocksPort", "[", "0", "]", "else", ":", "if", "not", "any", "(", "[", "socks_config", "in", "port", "for", "port", "in", "self", ".", "SocksPort", "]", ")", ":", "# need to configure Tor", "self", ".", "SocksPort", ".", "append", "(", "socks_config", ")", "try", ":", "yield", "self", ".", "save", "(", ")", "except", "TorProtocolError", "as", "e", ":", "extra", "=", "''", "if", "socks_config", ".", "startswith", "(", "'unix:'", ")", ":", "# XXX so why don't we check this for the", "# caller, earlier on?", "extra", "=", "'\\nNote Tor has specific ownership/permissions '", "+", "'requirements for unix sockets and parent dir.'", "raise", "RuntimeError", "(", "\"While configuring SOCKSPort to '{}', error from\"", "\" Tor: {}{}\"", ".", "format", "(", "socks_config", ",", "e", ",", "extra", ")", ")", "defer", ".", "returnValue", "(", "_endpoint_from_socksport_line", "(", "reactor", ",", "socks_config", ")", ")" ]
Creates a new TorSocksEndpoint instance given a valid configuration line for ``SocksPort``; if this configuration isn't already in the underlying tor, we add it. Note that this method may call :meth:`txtorcon.TorConfig.save()` on this instance. Note that calling this with `socks_config=None` is equivalent to calling `.socks_endpoint` (which is not async). XXX socks_config should be .. i dunno, but there's fucking options and craziness, e.g. default Tor Browser Bundle is: ['9150 IPv6Traffic PreferIPv6 KeepAliveIsolateSOCKSAuth', '9155'] XXX maybe we should say "socks_port" as the 3rd arg, insist it's an int, and then allow/support all the other options (e.g. via kwargs) XXX we could avoid the "maybe call .save()" thing; worth it? (actually, no we can't or the Tor won't have it config'd)
[ "Creates", "a", "new", "TorSocksEndpoint", "instance", "given", "a", "valid", "configuration", "line", "for", "SocksPort", ";", "if", "this", "configuration", "isn", "t", "already", "in", "the", "underlying", "tor", "we", "add", "it", ".", "Note", "that", "this", "method", "may", "call", ":", "meth", ":", "txtorcon", ".", "TorConfig", ".", "save", "()", "on", "this", "instance", "." ]
python
train
41.555556
jtwhite79/pyemu
pyemu/mat/mat_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/mat/mat_handler.py#L753-L791
def __set_svd(self): """private method to set SVD components. Note: this should not be called directly """ if self.isdiagonal: x = np.diag(self.x.flatten()) else: # just a pointer to x x = self.x try: u, s, v = la.svd(x, full_matrices=True) v = v.transpose() except Exception as e: print("standard SVD failed: {0}".format(str(e))) try: v, s, u = la.svd(x.transpose(), full_matrices=True) u = u.transpose() except Exception as e: np.savetxt("failed_svd.dat",x,fmt="%15.6E") raise Exception("Matrix.__set_svd(): " + "unable to compute SVD of self.x, " + "saved matrix to 'failed_svd.dat' -- {0}".\ format(str(e))) col_names = ["left_sing_vec_" + str(i + 1) for i in range(u.shape[1])] self.__u = Matrix(x=u, row_names=self.row_names, col_names=col_names, autoalign=False) sing_names = ["sing_val_" + str(i + 1) for i in range(s.shape[0])] self.__s = Matrix(x=np.atleast_2d(s).transpose(), row_names=sing_names, col_names=sing_names, isdiagonal=True, autoalign=False) col_names = ["right_sing_vec_" + str(i + 1) for i in range(v.shape[0])] self.__v = Matrix(v, row_names=self.col_names, col_names=col_names, autoalign=False)
[ "def", "__set_svd", "(", "self", ")", ":", "if", "self", ".", "isdiagonal", ":", "x", "=", "np", ".", "diag", "(", "self", ".", "x", ".", "flatten", "(", ")", ")", "else", ":", "# just a pointer to x", "x", "=", "self", ".", "x", "try", ":", "u", ",", "s", ",", "v", "=", "la", ".", "svd", "(", "x", ",", "full_matrices", "=", "True", ")", "v", "=", "v", ".", "transpose", "(", ")", "except", "Exception", "as", "e", ":", "print", "(", "\"standard SVD failed: {0}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "try", ":", "v", ",", "s", ",", "u", "=", "la", ".", "svd", "(", "x", ".", "transpose", "(", ")", ",", "full_matrices", "=", "True", ")", "u", "=", "u", ".", "transpose", "(", ")", "except", "Exception", "as", "e", ":", "np", ".", "savetxt", "(", "\"failed_svd.dat\"", ",", "x", ",", "fmt", "=", "\"%15.6E\"", ")", "raise", "Exception", "(", "\"Matrix.__set_svd(): \"", "+", "\"unable to compute SVD of self.x, \"", "+", "\"saved matrix to 'failed_svd.dat' -- {0}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "col_names", "=", "[", "\"left_sing_vec_\"", "+", "str", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "u", ".", "shape", "[", "1", "]", ")", "]", "self", ".", "__u", "=", "Matrix", "(", "x", "=", "u", ",", "row_names", "=", "self", ".", "row_names", ",", "col_names", "=", "col_names", ",", "autoalign", "=", "False", ")", "sing_names", "=", "[", "\"sing_val_\"", "+", "str", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "s", ".", "shape", "[", "0", "]", ")", "]", "self", ".", "__s", "=", "Matrix", "(", "x", "=", "np", ".", "atleast_2d", "(", "s", ")", ".", "transpose", "(", ")", ",", "row_names", "=", "sing_names", ",", "col_names", "=", "sing_names", ",", "isdiagonal", "=", "True", ",", "autoalign", "=", "False", ")", "col_names", "=", "[", "\"right_sing_vec_\"", "+", "str", "(", "i", "+", "1", ")", "for", "i", "in", "range", "(", "v", ".", "shape", "[", "0", "]", ")", "]", "self", ".", "__v", "=", "Matrix", "(", "v", ",", "row_names", "=", "self", ".", "col_names", ",", "col_names", "=", "col_names", ",", "autoalign", "=", "False", ")" ]
private method to set SVD components. Note: this should not be called directly
[ "private", "method", "to", "set", "SVD", "components", "." ]
python
train
39.820513
saltstack/salt
salt/returners/sqlite3_return.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/sqlite3_return.py#L220-L237
def get_jid(jid): ''' Return the information returned from a specified jid ''' log.debug('sqlite3 returner <get_jid> called jid: %s', jid) conn = _get_conn(ret=None) cur = conn.cursor() sql = '''SELECT id, full_ret FROM salt_returns WHERE jid = :jid''' cur.execute(sql, {'jid': jid}) data = cur.fetchone() log.debug('query result: %s', data) ret = {} if data and len(data) > 1: ret = {six.text_type(data[0]): {'return': salt.utils.json.loads(data[1])}} log.debug('ret: %s', ret) _close_conn(conn) return ret
[ "def", "get_jid", "(", "jid", ")", ":", "log", ".", "debug", "(", "'sqlite3 returner <get_jid> called jid: %s'", ",", "jid", ")", "conn", "=", "_get_conn", "(", "ret", "=", "None", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "sql", "=", "'''SELECT id, full_ret FROM salt_returns WHERE jid = :jid'''", "cur", ".", "execute", "(", "sql", ",", "{", "'jid'", ":", "jid", "}", ")", "data", "=", "cur", ".", "fetchone", "(", ")", "log", ".", "debug", "(", "'query result: %s'", ",", "data", ")", "ret", "=", "{", "}", "if", "data", "and", "len", "(", "data", ")", ">", "1", ":", "ret", "=", "{", "six", ".", "text_type", "(", "data", "[", "0", "]", ")", ":", "{", "'return'", ":", "salt", ".", "utils", ".", "json", ".", "loads", "(", "data", "[", "1", "]", ")", "}", "}", "log", ".", "debug", "(", "'ret: %s'", ",", "ret", ")", "_close_conn", "(", "conn", ")", "return", "ret" ]
Return the information returned from a specified jid
[ "Return", "the", "information", "returned", "from", "a", "specified", "jid" ]
python
train
32.111111
Crunch-io/crunch-cube
src/cr/cube/dimension.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/dimension.py#L209-L225
def _resolve_array_type(self): """Return one of the ARRAY_TYPES members of DIMENSION_TYPE. This method distinguishes between CA and MR dimensions. The return value is only meaningful if the dimension is known to be of array type (i.e. either CA or MR, base-type 'enum.variable'). """ next_raw_dimension = self._next_raw_dimension if next_raw_dimension is None: return DT.CA is_mr_subvar = ( next_raw_dimension._base_type == "categorical" and next_raw_dimension._has_selected_category and next_raw_dimension._alias == self._alias ) return DT.MR if is_mr_subvar else DT.CA
[ "def", "_resolve_array_type", "(", "self", ")", ":", "next_raw_dimension", "=", "self", ".", "_next_raw_dimension", "if", "next_raw_dimension", "is", "None", ":", "return", "DT", ".", "CA", "is_mr_subvar", "=", "(", "next_raw_dimension", ".", "_base_type", "==", "\"categorical\"", "and", "next_raw_dimension", ".", "_has_selected_category", "and", "next_raw_dimension", ".", "_alias", "==", "self", ".", "_alias", ")", "return", "DT", ".", "MR", "if", "is_mr_subvar", "else", "DT", ".", "CA" ]
Return one of the ARRAY_TYPES members of DIMENSION_TYPE. This method distinguishes between CA and MR dimensions. The return value is only meaningful if the dimension is known to be of array type (i.e. either CA or MR, base-type 'enum.variable').
[ "Return", "one", "of", "the", "ARRAY_TYPES", "members", "of", "DIMENSION_TYPE", "." ]
python
train
40.235294
MacHu-GWU/crawlib-project
crawlib/util.py
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/util.py#L63-L77
def add_params(endpoint, params): """ Combine query endpoint and params. Example:: >>> add_params("https://www.google.com/search", {"q": "iphone"}) https://www.google.com/search?q=iphone """ p = PreparedRequest() p.prepare(url=endpoint, params=params) if PY2: # pragma: no cover return unicode(p.url) else: # pragma: no cover return p.url
[ "def", "add_params", "(", "endpoint", ",", "params", ")", ":", "p", "=", "PreparedRequest", "(", ")", "p", ".", "prepare", "(", "url", "=", "endpoint", ",", "params", "=", "params", ")", "if", "PY2", ":", "# pragma: no cover", "return", "unicode", "(", "p", ".", "url", ")", "else", ":", "# pragma: no cover", "return", "p", ".", "url" ]
Combine query endpoint and params. Example:: >>> add_params("https://www.google.com/search", {"q": "iphone"}) https://www.google.com/search?q=iphone
[ "Combine", "query", "endpoint", "and", "params", "." ]
python
train
26.133333
contentful-labs/contentful.py
contentful/cda/client.py
https://github.com/contentful-labs/contentful.py/blob/d9eb4a68abcad33e4766e2be8c7b35e605210b5a/contentful/cda/client.py#L274-L281
def first(self): """Attempt to retrieve only the first resource matching this request. :return: Result instance, or `None` if there are no matching resources. """ self.params['limit'] = 1 result = self.all() return result.items[0] if result.total > 0 else None
[ "def", "first", "(", "self", ")", ":", "self", ".", "params", "[", "'limit'", "]", "=", "1", "result", "=", "self", ".", "all", "(", ")", "return", "result", ".", "items", "[", "0", "]", "if", "result", ".", "total", ">", "0", "else", "None" ]
Attempt to retrieve only the first resource matching this request. :return: Result instance, or `None` if there are no matching resources.
[ "Attempt", "to", "retrieve", "only", "the", "first", "resource", "matching", "this", "request", "." ]
python
train
37.75
ejeschke/ginga
ginga/BaseImage.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L496-L540
def get_pixels_on_line(self, x1, y1, x2, y2, getvalues=True): """Uses Bresenham's line algorithm to enumerate the pixels along a line. (see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm) If `getvalues`==False then it will return tuples of (x, y) coordinates instead of pixel values. """ # NOTE: seems to be necessary or we get a non-terminating result x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) dx = abs(x2 - x1) dy = abs(y2 - y1) if x1 < x2: sx = 1 else: sx = -1 if y1 < y2: sy = 1 else: sy = -1 err = dx - dy res = [] x, y = x1, y1 while True: if getvalues: try: val = self.get_data_xy(x, y) except Exception: val = np.NaN res.append(val) else: res.append((x, y)) if (x == x2) and (y == y2): break e2 = 2 * err if e2 > -dy: err = err - dy x += sx if e2 < dx: err = err + dx y += sy return res
[ "def", "get_pixels_on_line", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "getvalues", "=", "True", ")", ":", "# NOTE: seems to be necessary or we get a non-terminating result", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "int", "(", "x1", ")", ",", "int", "(", "y1", ")", ",", "int", "(", "x2", ")", ",", "int", "(", "y2", ")", "dx", "=", "abs", "(", "x2", "-", "x1", ")", "dy", "=", "abs", "(", "y2", "-", "y1", ")", "if", "x1", "<", "x2", ":", "sx", "=", "1", "else", ":", "sx", "=", "-", "1", "if", "y1", "<", "y2", ":", "sy", "=", "1", "else", ":", "sy", "=", "-", "1", "err", "=", "dx", "-", "dy", "res", "=", "[", "]", "x", ",", "y", "=", "x1", ",", "y1", "while", "True", ":", "if", "getvalues", ":", "try", ":", "val", "=", "self", ".", "get_data_xy", "(", "x", ",", "y", ")", "except", "Exception", ":", "val", "=", "np", ".", "NaN", "res", ".", "append", "(", "val", ")", "else", ":", "res", ".", "append", "(", "(", "x", ",", "y", ")", ")", "if", "(", "x", "==", "x2", ")", "and", "(", "y", "==", "y2", ")", ":", "break", "e2", "=", "2", "*", "err", "if", "e2", ">", "-", "dy", ":", "err", "=", "err", "-", "dy", "x", "+=", "sx", "if", "e2", "<", "dx", ":", "err", "=", "err", "+", "dx", "y", "+=", "sy", "return", "res" ]
Uses Bresenham's line algorithm to enumerate the pixels along a line. (see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm) If `getvalues`==False then it will return tuples of (x, y) coordinates instead of pixel values.
[ "Uses", "Bresenham", "s", "line", "algorithm", "to", "enumerate", "the", "pixels", "along", "a", "line", ".", "(", "see", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Bresenham%27s_line_algorithm", ")" ]
python
train
27.355556
CIRCL/IP-ASN-history
client/ipasn_redis/api.py
https://github.com/CIRCL/IP-ASN-history/blob/2e02ced01a08531a007d9cd71547c8248570de1b/client/ipasn_redis/api.py#L77-L89
def asn(self, ip, announce_date=None): """ Give an IP, maybe a date, get the ASN. This is the fastest command. :param ip: IP address to search for :param announce_date: Date of the announcement :rtype: String, ASN. """ assignations, announce_date, _ = self.run(ip, announce_date) return next((assign for assign in assignations if assign is not None), None), announce_date
[ "def", "asn", "(", "self", ",", "ip", ",", "announce_date", "=", "None", ")", ":", "assignations", ",", "announce_date", ",", "_", "=", "self", ".", "run", "(", "ip", ",", "announce_date", ")", "return", "next", "(", "(", "assign", "for", "assign", "in", "assignations", "if", "assign", "is", "not", "None", ")", ",", "None", ")", ",", "announce_date" ]
Give an IP, maybe a date, get the ASN. This is the fastest command. :param ip: IP address to search for :param announce_date: Date of the announcement :rtype: String, ASN.
[ "Give", "an", "IP", "maybe", "a", "date", "get", "the", "ASN", ".", "This", "is", "the", "fastest", "command", "." ]
python
valid
34.923077
acorg/dark-matter
dark/graphics.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/graphics.py#L623-L695
def alignmentPanelHTML(titlesAlignments, sortOn='maxScore', outputDir=None, idList=False, equalizeXAxes=False, xRange='subject', logLinearXAxis=False, logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE, rankScores=False, showFeatures=True, showOrfs=True): """ Produces an HTML index file in C{outputDir} and a collection of alignment graphs and FASTA files to summarize the information in C{titlesAlignments}. @param titlesAlignments: A L{dark.titles.TitlesAlignments} instance. @param sortOn: The attribute to sort subplots on. Either "maxScore", "medianScore", "readCount", "length", or "title". @param outputDir: Specifies a C{str} directory to write the HTML to. If the directory does not exist it will be created. @param idList: A dictionary. Keys are colors and values are lists of read ids that should be colored using that color. @param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot to be the same. @param xRange: Set to either 'subject' or 'reads' to indicate the range of the X axis. @param logLinearXAxis: If C{True}, convert read offsets so that empty regions in the plots we're preparing will only be as wide as their logged actual values. @param logBase: The logarithm base to use if logLinearXAxis is C{True}. @param: rankScores: If C{True}, change the scores for the reads for each title to be their rank (worst to best). @param showFeatures: If C{True}, look online for features of the subject sequences. @param showOrfs: If C{True}, open reading frames will be displayed. @raise TypeError: If C{outputDir} is C{None}. @raise ValueError: If C{outputDir} is None or exists but is not a directory or if C{xRange} is not "subject" or "reads". """ if xRange not in ('subject', 'reads'): raise ValueError('xRange must be either "subject" or "reads".') if equalizeXAxes: raise NotImplementedError('This feature is not yet implemented.') titles = titlesAlignments.sortTitles(sortOn) if os.access(outputDir, os.F_OK): # outputDir exists. Check it's a directory. if not S_ISDIR(os.stat(outputDir).st_mode): raise ValueError("%r is not a directory." % outputDir) else: if outputDir is None: raise ValueError("The outputDir needs to be specified.") else: os.mkdir(outputDir) htmlWriter = AlignmentPanelHTMLWriter(outputDir, titlesAlignments) for i, title in enumerate(titles): # titleAlignments = titlesAlignments[title] # If we are writing data to a file too, create a separate file with # a plot (this will be linked from the summary HTML). imageBasename = '%d.png' % i imageFile = '%s/%s' % (outputDir, imageBasename) graphInfo = alignmentGraph( titlesAlignments, title, addQueryLines=True, showFeatures=showFeatures, rankScores=rankScores, logLinearXAxis=logLinearXAxis, logBase=logBase, colorQueryBases=False, showFigure=False, imageFile=imageFile, quiet=True, idList=idList, xRange=xRange, showOrfs=showOrfs) # Close the image plot to make sure memory is flushed. plt.close() htmlWriter.addImage(imageBasename, title, graphInfo) htmlWriter.close()
[ "def", "alignmentPanelHTML", "(", "titlesAlignments", ",", "sortOn", "=", "'maxScore'", ",", "outputDir", "=", "None", ",", "idList", "=", "False", ",", "equalizeXAxes", "=", "False", ",", "xRange", "=", "'subject'", ",", "logLinearXAxis", "=", "False", ",", "logBase", "=", "DEFAULT_LOG_LINEAR_X_AXIS_BASE", ",", "rankScores", "=", "False", ",", "showFeatures", "=", "True", ",", "showOrfs", "=", "True", ")", ":", "if", "xRange", "not", "in", "(", "'subject'", ",", "'reads'", ")", ":", "raise", "ValueError", "(", "'xRange must be either \"subject\" or \"reads\".'", ")", "if", "equalizeXAxes", ":", "raise", "NotImplementedError", "(", "'This feature is not yet implemented.'", ")", "titles", "=", "titlesAlignments", ".", "sortTitles", "(", "sortOn", ")", "if", "os", ".", "access", "(", "outputDir", ",", "os", ".", "F_OK", ")", ":", "# outputDir exists. Check it's a directory.", "if", "not", "S_ISDIR", "(", "os", ".", "stat", "(", "outputDir", ")", ".", "st_mode", ")", ":", "raise", "ValueError", "(", "\"%r is not a directory.\"", "%", "outputDir", ")", "else", ":", "if", "outputDir", "is", "None", ":", "raise", "ValueError", "(", "\"The outputDir needs to be specified.\"", ")", "else", ":", "os", ".", "mkdir", "(", "outputDir", ")", "htmlWriter", "=", "AlignmentPanelHTMLWriter", "(", "outputDir", ",", "titlesAlignments", ")", "for", "i", ",", "title", "in", "enumerate", "(", "titles", ")", ":", "# titleAlignments = titlesAlignments[title]", "# If we are writing data to a file too, create a separate file with", "# a plot (this will be linked from the summary HTML).", "imageBasename", "=", "'%d.png'", "%", "i", "imageFile", "=", "'%s/%s'", "%", "(", "outputDir", ",", "imageBasename", ")", "graphInfo", "=", "alignmentGraph", "(", "titlesAlignments", ",", "title", ",", "addQueryLines", "=", "True", ",", "showFeatures", "=", "showFeatures", ",", "rankScores", "=", "rankScores", ",", "logLinearXAxis", "=", "logLinearXAxis", ",", "logBase", "=", "logBase", ",", "colorQueryBases", "=", "False", ",", "showFigure", "=", "False", ",", "imageFile", "=", "imageFile", ",", "quiet", "=", "True", ",", "idList", "=", "idList", ",", "xRange", "=", "xRange", ",", "showOrfs", "=", "showOrfs", ")", "# Close the image plot to make sure memory is flushed.", "plt", ".", "close", "(", ")", "htmlWriter", ".", "addImage", "(", "imageBasename", ",", "title", ",", "graphInfo", ")", "htmlWriter", ".", "close", "(", ")" ]
Produces an HTML index file in C{outputDir} and a collection of alignment graphs and FASTA files to summarize the information in C{titlesAlignments}. @param titlesAlignments: A L{dark.titles.TitlesAlignments} instance. @param sortOn: The attribute to sort subplots on. Either "maxScore", "medianScore", "readCount", "length", or "title". @param outputDir: Specifies a C{str} directory to write the HTML to. If the directory does not exist it will be created. @param idList: A dictionary. Keys are colors and values are lists of read ids that should be colored using that color. @param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot to be the same. @param xRange: Set to either 'subject' or 'reads' to indicate the range of the X axis. @param logLinearXAxis: If C{True}, convert read offsets so that empty regions in the plots we're preparing will only be as wide as their logged actual values. @param logBase: The logarithm base to use if logLinearXAxis is C{True}. @param: rankScores: If C{True}, change the scores for the reads for each title to be their rank (worst to best). @param showFeatures: If C{True}, look online for features of the subject sequences. @param showOrfs: If C{True}, open reading frames will be displayed. @raise TypeError: If C{outputDir} is C{None}. @raise ValueError: If C{outputDir} is None or exists but is not a directory or if C{xRange} is not "subject" or "reads".
[ "Produces", "an", "HTML", "index", "file", "in", "C", "{", "outputDir", "}", "and", "a", "collection", "of", "alignment", "graphs", "and", "FASTA", "files", "to", "summarize", "the", "information", "in", "C", "{", "titlesAlignments", "}", "." ]
python
train
46.506849
PMBio/limix-backup
limix/varDecomp/varianceDecomposition.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L644-L726
def crossValidation(self,seed=0,n_folds=10,fullVector=True,verbose=None,D=None,**keywords): """ Split the dataset in n folds, predict each fold after training the model on all the others Args: seed: seed n_folds: number of folds to train the model on fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values verbose: if true, prints the fold that is being used for predicitons **keywords: params to pass to the function optimize Returns: Matrix of phenotype predictions [N,P] """ verbose = dlimix.getVerbose(verbose) # split samples into training and test sp.random.seed(seed) r = sp.random.permutation(self.Y.shape[0]) nfolds = 10 Icv = sp.floor(((sp.ones((self.Y.shape[0]))*nfolds)*r)/self.Y.shape[0]) RV = {} if self.P==1: RV['var'] = sp.zeros((nfolds,self.n_randEffs)) else: RV['var'] = sp.zeros((nfolds,self.P,self.n_randEffs)) Ystar = sp.zeros_like(self.Y) for fold_j in range(n_folds): if verbose: print((".. predict fold %d"%fold_j)) Itrain = Icv!=fold_j Itest = Icv==fold_j Ytrain = self.Y[Itrain,:] Ytest = self.Y[Itest,:] vc = VarianceDecomposition(Ytrain) vc.setTestSampleSize(Itest.sum()) for term_i in range(self.n_fixedEffs): F = self.vd.getFixed(term_i) Ftest = F[Itest,:] Ftrain = F[Itrain,:] if self.P>1: A = self.vd.getDesign(term_i) else: A = None vc.addFixedEffect(F=Ftrain,Ftest=Ftest,A=A) for term_i in range(self.n_randEffs): if self.P>1: tct = self.trait_covar_type[term_i] rank = self.rank[term_i] ftc = self.fixed_tc[term_i] jitt = self.jitter[term_i] if tct=='lowrank_diag1' or tct=='freeform1': d = D[fold_j,:,term_i] else: d = None else: tct = None rank = None ftc = None jitt = None d = None if term_i==self.noisPos: vc.addRandomEffect(is_noise=True,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d) else: R = self.vd.getTerm(term_i).getK() Rtrain = R[Itrain,:][:,Itrain] Rcross = R[Itrain,:][:,Itest] vc.addRandomEffect(K=Rtrain,Kcross=Rcross,trait_covar_type=tct,rank=rank,jitter=jitt,fixed_trait_covar=ftc,d=d) conv = vc.optimize(verbose=False,**keywords) if self.P==1: RV['var'][fold_j,:] = vc.getVarianceComps()[0,:] else: RV['var'][fold_j,:,:] = vc.getVarianceComps() if fullVector: assert conv, 'VarianceDecompositon:: not converged for fold %d. Stopped here' % fold_j if conv: Ystar[Itest,:] = vc.predictPhenos() else: warnings.warn('not converged for fold %d' % fold_j) Ystar[Itest,:] = sp.nan return Ystar,RV
[ "def", "crossValidation", "(", "self", ",", "seed", "=", "0", ",", "n_folds", "=", "10", ",", "fullVector", "=", "True", ",", "verbose", "=", "None", ",", "D", "=", "None", ",", "*", "*", "keywords", ")", ":", "verbose", "=", "dlimix", ".", "getVerbose", "(", "verbose", ")", "# split samples into training and test", "sp", ".", "random", ".", "seed", "(", "seed", ")", "r", "=", "sp", ".", "random", ".", "permutation", "(", "self", ".", "Y", ".", "shape", "[", "0", "]", ")", "nfolds", "=", "10", "Icv", "=", "sp", ".", "floor", "(", "(", "(", "sp", ".", "ones", "(", "(", "self", ".", "Y", ".", "shape", "[", "0", "]", ")", ")", "*", "nfolds", ")", "*", "r", ")", "/", "self", ".", "Y", ".", "shape", "[", "0", "]", ")", "RV", "=", "{", "}", "if", "self", ".", "P", "==", "1", ":", "RV", "[", "'var'", "]", "=", "sp", ".", "zeros", "(", "(", "nfolds", ",", "self", ".", "n_randEffs", ")", ")", "else", ":", "RV", "[", "'var'", "]", "=", "sp", ".", "zeros", "(", "(", "nfolds", ",", "self", ".", "P", ",", "self", ".", "n_randEffs", ")", ")", "Ystar", "=", "sp", ".", "zeros_like", "(", "self", ".", "Y", ")", "for", "fold_j", "in", "range", "(", "n_folds", ")", ":", "if", "verbose", ":", "print", "(", "(", "\".. predict fold %d\"", "%", "fold_j", ")", ")", "Itrain", "=", "Icv", "!=", "fold_j", "Itest", "=", "Icv", "==", "fold_j", "Ytrain", "=", "self", ".", "Y", "[", "Itrain", ",", ":", "]", "Ytest", "=", "self", ".", "Y", "[", "Itest", ",", ":", "]", "vc", "=", "VarianceDecomposition", "(", "Ytrain", ")", "vc", ".", "setTestSampleSize", "(", "Itest", ".", "sum", "(", ")", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_fixedEffs", ")", ":", "F", "=", "self", ".", "vd", ".", "getFixed", "(", "term_i", ")", "Ftest", "=", "F", "[", "Itest", ",", ":", "]", "Ftrain", "=", "F", "[", "Itrain", ",", ":", "]", "if", "self", ".", "P", ">", "1", ":", "A", "=", "self", ".", "vd", ".", "getDesign", "(", "term_i", ")", "else", ":", "A", "=", "None", "vc", ".", "addFixedEffect", "(", "F", "=", "Ftrain", ",", "Ftest", "=", "Ftest", ",", "A", "=", "A", ")", "for", "term_i", "in", "range", "(", "self", ".", "n_randEffs", ")", ":", "if", "self", ".", "P", ">", "1", ":", "tct", "=", "self", ".", "trait_covar_type", "[", "term_i", "]", "rank", "=", "self", ".", "rank", "[", "term_i", "]", "ftc", "=", "self", ".", "fixed_tc", "[", "term_i", "]", "jitt", "=", "self", ".", "jitter", "[", "term_i", "]", "if", "tct", "==", "'lowrank_diag1'", "or", "tct", "==", "'freeform1'", ":", "d", "=", "D", "[", "fold_j", ",", ":", ",", "term_i", "]", "else", ":", "d", "=", "None", "else", ":", "tct", "=", "None", "rank", "=", "None", "ftc", "=", "None", "jitt", "=", "None", "d", "=", "None", "if", "term_i", "==", "self", ".", "noisPos", ":", "vc", ".", "addRandomEffect", "(", "is_noise", "=", "True", ",", "trait_covar_type", "=", "tct", ",", "rank", "=", "rank", ",", "jitter", "=", "jitt", ",", "fixed_trait_covar", "=", "ftc", ",", "d", "=", "d", ")", "else", ":", "R", "=", "self", ".", "vd", ".", "getTerm", "(", "term_i", ")", ".", "getK", "(", ")", "Rtrain", "=", "R", "[", "Itrain", ",", ":", "]", "[", ":", ",", "Itrain", "]", "Rcross", "=", "R", "[", "Itrain", ",", ":", "]", "[", ":", ",", "Itest", "]", "vc", ".", "addRandomEffect", "(", "K", "=", "Rtrain", ",", "Kcross", "=", "Rcross", ",", "trait_covar_type", "=", "tct", ",", "rank", "=", "rank", ",", "jitter", "=", "jitt", ",", "fixed_trait_covar", "=", "ftc", ",", "d", "=", "d", ")", "conv", "=", "vc", ".", "optimize", "(", "verbose", "=", "False", ",", "*", "*", "keywords", ")", "if", "self", ".", "P", "==", "1", ":", "RV", "[", "'var'", "]", "[", "fold_j", ",", ":", "]", "=", "vc", ".", "getVarianceComps", "(", ")", "[", "0", ",", ":", "]", "else", ":", "RV", "[", "'var'", "]", "[", "fold_j", ",", ":", ",", ":", "]", "=", "vc", ".", "getVarianceComps", "(", ")", "if", "fullVector", ":", "assert", "conv", ",", "'VarianceDecompositon:: not converged for fold %d. Stopped here'", "%", "fold_j", "if", "conv", ":", "Ystar", "[", "Itest", ",", ":", "]", "=", "vc", ".", "predictPhenos", "(", ")", "else", ":", "warnings", ".", "warn", "(", "'not converged for fold %d'", "%", "fold_j", ")", "Ystar", "[", "Itest", ",", ":", "]", "=", "sp", ".", "nan", "return", "Ystar", ",", "RV" ]
Split the dataset in n folds, predict each fold after training the model on all the others Args: seed: seed n_folds: number of folds to train the model on fullVector: Bolean indicator, if true it stops if no convergence is observed for one of the folds, otherwise goes through and returns a pheno matrix with missing values verbose: if true, prints the fold that is being used for predicitons **keywords: params to pass to the function optimize Returns: Matrix of phenotype predictions [N,P]
[ "Split", "the", "dataset", "in", "n", "folds", "predict", "each", "fold", "after", "training", "the", "model", "on", "all", "the", "others" ]
python
train
42.108434
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/atomic_reference_compare_and_set_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/atomic_reference_compare_and_set_codec.py#L23-L36
def encode_request(name, expected, updated): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, expected, updated)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_bool(expected is None) if expected is not None: client_message.append_data(expected) client_message.append_bool(updated is None) if updated is not None: client_message.append_data(updated) client_message.update_frame_length() return client_message
[ "def", "encode_request", "(", "name", ",", "expected", ",", "updated", ")", ":", "client_message", "=", "ClientMessage", "(", "payload_size", "=", "calculate_size", "(", "name", ",", "expected", ",", "updated", ")", ")", "client_message", ".", "set_message_type", "(", "REQUEST_TYPE", ")", "client_message", ".", "set_retryable", "(", "RETRYABLE", ")", "client_message", ".", "append_str", "(", "name", ")", "client_message", ".", "append_bool", "(", "expected", "is", "None", ")", "if", "expected", "is", "not", "None", ":", "client_message", ".", "append_data", "(", "expected", ")", "client_message", ".", "append_bool", "(", "updated", "is", "None", ")", "if", "updated", "is", "not", "None", ":", "client_message", ".", "append_data", "(", "updated", ")", "client_message", ".", "update_frame_length", "(", ")", "return", "client_message" ]
Encode request into client_message
[ "Encode", "request", "into", "client_message" ]
python
train
43.285714
DAI-Lab/Copulas
copulas/multivariate/tree.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/tree.py#L94-L109
def _check_contraint(self, edge1, edge2): """Check if two edges satisfy vine constraint. Args: :param edge1: edge object representing edge1 :param edge2: edge object representing edge2 :type edge1: Edge object :type edge2: Edge object Returns: Boolean True if the two edges satisfy vine constraints """ full_node = set([edge1.L, edge1.R, edge2.L, edge2.R]) full_node.update(edge1.D) full_node.update(edge2.D) return len(full_node) == (self.level + 1)
[ "def", "_check_contraint", "(", "self", ",", "edge1", ",", "edge2", ")", ":", "full_node", "=", "set", "(", "[", "edge1", ".", "L", ",", "edge1", ".", "R", ",", "edge2", ".", "L", ",", "edge2", ".", "R", "]", ")", "full_node", ".", "update", "(", "edge1", ".", "D", ")", "full_node", ".", "update", "(", "edge2", ".", "D", ")", "return", "len", "(", "full_node", ")", "==", "(", "self", ".", "level", "+", "1", ")" ]
Check if two edges satisfy vine constraint. Args: :param edge1: edge object representing edge1 :param edge2: edge object representing edge2 :type edge1: Edge object :type edge2: Edge object Returns: Boolean True if the two edges satisfy vine constraints
[ "Check", "if", "two", "edges", "satisfy", "vine", "constraint", "." ]
python
train
35.0625
saltstack/salt
salt/modules/xfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xfs.py#L275-L297
def _xfs_prune_output(out, uuid): ''' Parse prune output. ''' data = {} cnt = [] cutpoint = False for line in [l.strip() for l in out.split("\n") if l]: if line.startswith("-"): if cutpoint: break else: cutpoint = True continue if cutpoint: cnt.append(line) for kset in [e for e in cnt[1:] if ':' in e]: key, val = [t.strip() for t in kset.split(":", 1)] data[key.lower().replace(" ", "_")] = val return data.get('uuid') == uuid and data or {}
[ "def", "_xfs_prune_output", "(", "out", ",", "uuid", ")", ":", "data", "=", "{", "}", "cnt", "=", "[", "]", "cutpoint", "=", "False", "for", "line", "in", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "out", ".", "split", "(", "\"\\n\"", ")", "if", "l", "]", ":", "if", "line", ".", "startswith", "(", "\"-\"", ")", ":", "if", "cutpoint", ":", "break", "else", ":", "cutpoint", "=", "True", "continue", "if", "cutpoint", ":", "cnt", ".", "append", "(", "line", ")", "for", "kset", "in", "[", "e", "for", "e", "in", "cnt", "[", "1", ":", "]", "if", "':'", "in", "e", "]", ":", "key", ",", "val", "=", "[", "t", ".", "strip", "(", ")", "for", "t", "in", "kset", ".", "split", "(", "\":\"", ",", "1", ")", "]", "data", "[", "key", ".", "lower", "(", ")", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "]", "=", "val", "return", "data", ".", "get", "(", "'uuid'", ")", "==", "uuid", "and", "data", "or", "{", "}" ]
Parse prune output.
[ "Parse", "prune", "output", "." ]
python
train
25.043478
cvxopt/chompack
src/python/conversion.py
https://github.com/cvxopt/chompack/blob/e07106b58b8055c34f6201e8c954482f86987833/src/python/conversion.py#L6-L75
def symb_to_block(symb, coupling = 'full'): """ Maps a symbolic factorization to a block-diagonal structure with coupling constraints. :param symb: :py:class:`symbolic` :param coupling: optional :return dims: list of block dimensions :return sparse_to_block: dictionary :return constraints: list of coupling constraints """ n = len(symb.snode) # order of block Ncliques = len(symb.snpar) # number of cliques # compute clique orders dims = [symb.sncolptr[j+1]-symb.sncolptr[j] for j in range(Ncliques)] # compute offsets in block-diagonal structure offsets = [0] for i in range(Ncliques): offsets.append(offsets[-1] + dims[i]**2) constraints = [] # list of coupling constraints sparse_to_block = {} # conversion dictionary for k in range(Ncliques): # map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure nodes = symb.snode[symb.snptr[k]:symb.snptr[k+1]] rows = symb.snrowidx[symb.sncolptr[k]:symb.sncolptr[k+1]] nk = len(nodes) # number of nodes in supernode wk = len(rows) # number of nodes in clique for j in range(nk): for i in range(j,wk): if i == j: sparse_to_block[nodes[j]*n + rows[i]] = (offsets[k] + j*wk + i,) else: sparse_to_block[nodes[j]*n + rows[i]] =(offsets[k] + j*wk + i, offsets[k] + i*wk + j) # add coupling constraints to list of constraints if symb.snpar[k] == k: continue # skip if supernode k is a root supernode p = symb.snpar[k] np = len(symb.snode[symb.snptr[p]:symb.snptr[p+1]]) wp = symb.sncolptr[p+1] - symb.sncolptr[p] ri = symb.relidx[symb.relptr[k]:symb.relptr[k+1]] if type(coupling) is spmatrix: tmp = coupling[rows[nk:],rows[nk:]] for i,j in zip(tmp.I,tmp.J): if j == i: constraints.append((offsets[k] + (j+nk)*wk + i+nk, offsets[p] + ri[j]*wp + ri[i])) else: constraints.append((offsets[k] + (j+nk)*wk + i+nk, offsets[p] + ri[j]*wp + ri[i], offsets[k] + (i+nk)*wk + j+nk, offsets[p] + ri[i]*wp + ri[j])) elif coupling == 'full': for j in range(len(ri)): for i in range(j,len(ri)): if j == i: constraints.append((offsets[k] + (j+nk)*wk + i+nk, offsets[p] + ri[j]*wp + ri[i])) else: constraints.append((offsets[k] + (j+nk)*wk + i+nk, offsets[p] + ri[j]*wp + ri[i], offsets[k] + (i+nk)*wk + j+nk, offsets[p] + ri[i]*wp + ri[j])) return dims, sparse_to_block, constraints
[ "def", "symb_to_block", "(", "symb", ",", "coupling", "=", "'full'", ")", ":", "n", "=", "len", "(", "symb", ".", "snode", ")", "# order of block", "Ncliques", "=", "len", "(", "symb", ".", "snpar", ")", "# number of cliques", "# compute clique orders", "dims", "=", "[", "symb", ".", "sncolptr", "[", "j", "+", "1", "]", "-", "symb", ".", "sncolptr", "[", "j", "]", "for", "j", "in", "range", "(", "Ncliques", ")", "]", "# compute offsets in block-diagonal structure", "offsets", "=", "[", "0", "]", "for", "i", "in", "range", "(", "Ncliques", ")", ":", "offsets", ".", "append", "(", "offsets", "[", "-", "1", "]", "+", "dims", "[", "i", "]", "**", "2", ")", "constraints", "=", "[", "]", "# list of coupling constraints", "sparse_to_block", "=", "{", "}", "# conversion dictionary", "for", "k", "in", "range", "(", "Ncliques", ")", ":", "# map nonzeros in {Jk,Nk} part of clique k to block-diagonal structure", "nodes", "=", "symb", ".", "snode", "[", "symb", ".", "snptr", "[", "k", "]", ":", "symb", ".", "snptr", "[", "k", "+", "1", "]", "]", "rows", "=", "symb", ".", "snrowidx", "[", "symb", ".", "sncolptr", "[", "k", "]", ":", "symb", ".", "sncolptr", "[", "k", "+", "1", "]", "]", "nk", "=", "len", "(", "nodes", ")", "# number of nodes in supernode", "wk", "=", "len", "(", "rows", ")", "# number of nodes in clique", "for", "j", "in", "range", "(", "nk", ")", ":", "for", "i", "in", "range", "(", "j", ",", "wk", ")", ":", "if", "i", "==", "j", ":", "sparse_to_block", "[", "nodes", "[", "j", "]", "*", "n", "+", "rows", "[", "i", "]", "]", "=", "(", "offsets", "[", "k", "]", "+", "j", "*", "wk", "+", "i", ",", ")", "else", ":", "sparse_to_block", "[", "nodes", "[", "j", "]", "*", "n", "+", "rows", "[", "i", "]", "]", "=", "(", "offsets", "[", "k", "]", "+", "j", "*", "wk", "+", "i", ",", "offsets", "[", "k", "]", "+", "i", "*", "wk", "+", "j", ")", "# add coupling constraints to list of constraints", "if", "symb", ".", "snpar", "[", "k", "]", "==", "k", ":", "continue", "# skip if supernode k is a root supernode", "p", "=", "symb", ".", "snpar", "[", "k", "]", "np", "=", "len", "(", "symb", ".", "snode", "[", "symb", ".", "snptr", "[", "p", "]", ":", "symb", ".", "snptr", "[", "p", "+", "1", "]", "]", ")", "wp", "=", "symb", ".", "sncolptr", "[", "p", "+", "1", "]", "-", "symb", ".", "sncolptr", "[", "p", "]", "ri", "=", "symb", ".", "relidx", "[", "symb", ".", "relptr", "[", "k", "]", ":", "symb", ".", "relptr", "[", "k", "+", "1", "]", "]", "if", "type", "(", "coupling", ")", "is", "spmatrix", ":", "tmp", "=", "coupling", "[", "rows", "[", "nk", ":", "]", ",", "rows", "[", "nk", ":", "]", "]", "for", "i", ",", "j", "in", "zip", "(", "tmp", ".", "I", ",", "tmp", ".", "J", ")", ":", "if", "j", "==", "i", ":", "constraints", ".", "append", "(", "(", "offsets", "[", "k", "]", "+", "(", "j", "+", "nk", ")", "*", "wk", "+", "i", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "j", "]", "*", "wp", "+", "ri", "[", "i", "]", ")", ")", "else", ":", "constraints", ".", "append", "(", "(", "offsets", "[", "k", "]", "+", "(", "j", "+", "nk", ")", "*", "wk", "+", "i", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "j", "]", "*", "wp", "+", "ri", "[", "i", "]", ",", "offsets", "[", "k", "]", "+", "(", "i", "+", "nk", ")", "*", "wk", "+", "j", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "i", "]", "*", "wp", "+", "ri", "[", "j", "]", ")", ")", "elif", "coupling", "==", "'full'", ":", "for", "j", "in", "range", "(", "len", "(", "ri", ")", ")", ":", "for", "i", "in", "range", "(", "j", ",", "len", "(", "ri", ")", ")", ":", "if", "j", "==", "i", ":", "constraints", ".", "append", "(", "(", "offsets", "[", "k", "]", "+", "(", "j", "+", "nk", ")", "*", "wk", "+", "i", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "j", "]", "*", "wp", "+", "ri", "[", "i", "]", ")", ")", "else", ":", "constraints", ".", "append", "(", "(", "offsets", "[", "k", "]", "+", "(", "j", "+", "nk", ")", "*", "wk", "+", "i", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "j", "]", "*", "wp", "+", "ri", "[", "i", "]", ",", "offsets", "[", "k", "]", "+", "(", "i", "+", "nk", ")", "*", "wk", "+", "j", "+", "nk", ",", "offsets", "[", "p", "]", "+", "ri", "[", "i", "]", "*", "wp", "+", "ri", "[", "j", "]", ")", ")", "return", "dims", ",", "sparse_to_block", ",", "constraints" ]
Maps a symbolic factorization to a block-diagonal structure with coupling constraints. :param symb: :py:class:`symbolic` :param coupling: optional :return dims: list of block dimensions :return sparse_to_block: dictionary :return constraints: list of coupling constraints
[ "Maps", "a", "symbolic", "factorization", "to", "a", "block", "-", "diagonal", "structure", "with", "coupling", "constraints", "." ]
python
train
44.285714
jazzband/django-mongonaut
mongonaut/sites.py
https://github.com/jazzband/django-mongonaut/blob/5485b2e029dff8ae267a4cb39c92d0a72cb5b144/mongonaut/sites.py#L49-L51
def has_add_permission(self, request): """ Can add this object """ return request.user.is_authenticated and request.user.is_active and request.user.is_staff
[ "def", "has_add_permission", "(", "self", ",", "request", ")", ":", "return", "request", ".", "user", ".", "is_authenticated", "and", "request", ".", "user", ".", "is_active", "and", "request", ".", "user", ".", "is_staff" ]
Can add this object
[ "Can", "add", "this", "object" ]
python
valid
56.666667
fishtown-analytics/dbt
core/dbt/adapters/cache.py
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/adapters/cache.py#L344-L358
def drop(self, relation): """Drop the named relation and cascade it appropriately to all dependent relations. Because dbt proactively does many `drop relation if exist ... cascade` that are noops, nonexistent relation drops cause a debug log and no other actions. :param str schema: The schema of the relation to drop. :param str identifier: The identifier of the relation to drop. """ dropped = _make_key(relation) logger.debug('Dropping relation: {!s}'.format(dropped)) with self.lock: self._drop_cascade_relation(dropped)
[ "def", "drop", "(", "self", ",", "relation", ")", ":", "dropped", "=", "_make_key", "(", "relation", ")", "logger", ".", "debug", "(", "'Dropping relation: {!s}'", ".", "format", "(", "dropped", ")", ")", "with", "self", ".", "lock", ":", "self", ".", "_drop_cascade_relation", "(", "dropped", ")" ]
Drop the named relation and cascade it appropriately to all dependent relations. Because dbt proactively does many `drop relation if exist ... cascade` that are noops, nonexistent relation drops cause a debug log and no other actions. :param str schema: The schema of the relation to drop. :param str identifier: The identifier of the relation to drop.
[ "Drop", "the", "named", "relation", "and", "cascade", "it", "appropriately", "to", "all", "dependent", "relations", "." ]
python
train
40.8
gwww/elkm1
elkm1_lib/message.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/message.py#L57-L61
def _as_decode(self, msg): """AS: Arming status report.""" return {'armed_statuses': [x for x in msg[4:12]], 'arm_up_states': [x for x in msg[12:20]], 'alarm_states': [x for x in msg[20:28]]}
[ "def", "_as_decode", "(", "self", ",", "msg", ")", ":", "return", "{", "'armed_statuses'", ":", "[", "x", "for", "x", "in", "msg", "[", "4", ":", "12", "]", "]", ",", "'arm_up_states'", ":", "[", "x", "for", "x", "in", "msg", "[", "12", ":", "20", "]", "]", ",", "'alarm_states'", ":", "[", "x", "for", "x", "in", "msg", "[", "20", ":", "28", "]", "]", "}" ]
AS: Arming status report.
[ "AS", ":", "Arming", "status", "report", "." ]
python
train
47
kblin/ncbi-genome-download
ncbi_genome_download/core.py
https://github.com/kblin/ncbi-genome-download/blob/dc55382d351c29e1027be8fa3876701762c1d752/ncbi_genome_download/core.py#L405-L410
def grab_checksums_file(entry): """Grab the checksum file for a given entry.""" http_url = convert_ftp_url(entry['ftp_path']) full_url = '{}/md5checksums.txt'.format(http_url) req = requests.get(full_url) return req.text
[ "def", "grab_checksums_file", "(", "entry", ")", ":", "http_url", "=", "convert_ftp_url", "(", "entry", "[", "'ftp_path'", "]", ")", "full_url", "=", "'{}/md5checksums.txt'", ".", "format", "(", "http_url", ")", "req", "=", "requests", ".", "get", "(", "full_url", ")", "return", "req", ".", "text" ]
Grab the checksum file for a given entry.
[ "Grab", "the", "checksum", "file", "for", "a", "given", "entry", "." ]
python
train
39.166667
Workiva/furious
furious/extras/xsrf.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/xsrf.py#L81-L139
def verify_token_string(self, token_string, action=None, timeout=None, current_time=None): """Generate a hash of the given token contents that can be verified. :param token_string: A string containing the hashed token (generated by `generate_token_string`). :param action: A string containing the action that is being verified. :param timeout: An int or float representing the number of seconds that the token is valid for. If None then tokens are valid forever. :current_time: An int representing the number of seconds since the epoch. Will be used by to check for token expiry if `timeout` is set. If `None` then the current time will be used. :raises: XSRFTokenMalformed if the given token_string cannot be parsed. XSRFTokenExpiredException if the given token string is expired. XSRFTokenInvalid if the given token string does not match the contents of the `XSRFToken`. """ try: decoded_token_string = base64.urlsafe_b64decode(token_string) except TypeError: raise XSRFTokenMalformed() split_token = decoded_token_string.split(self._DELIMITER) if len(split_token) != 2: raise XSRFTokenMalformed() try: token_time = int(split_token[1]) except ValueError: raise XSRFTokenMalformed() if timeout is not None: if current_time is None: current_time = time.time() # If an attacker modifies the plain text time then it will not match # the hashed time so this check is sufficient. if (token_time + timeout) < current_time: raise XSRFTokenExpiredException() expected_token = XSRFToken(self.user_id, self.secret, token_time) expected_token_string = expected_token.generate_token_string(action) if len(expected_token_string) != len(token_string): raise XSRFTokenInvalid() # Compare the two strings in constant time to prevent timing attacks. different = 0 for a, b in zip(token_string, expected_token_string): different |= ord(a) ^ ord(b) if different: raise XSRFTokenInvalid()
[ "def", "verify_token_string", "(", "self", ",", "token_string", ",", "action", "=", "None", ",", "timeout", "=", "None", ",", "current_time", "=", "None", ")", ":", "try", ":", "decoded_token_string", "=", "base64", ".", "urlsafe_b64decode", "(", "token_string", ")", "except", "TypeError", ":", "raise", "XSRFTokenMalformed", "(", ")", "split_token", "=", "decoded_token_string", ".", "split", "(", "self", ".", "_DELIMITER", ")", "if", "len", "(", "split_token", ")", "!=", "2", ":", "raise", "XSRFTokenMalformed", "(", ")", "try", ":", "token_time", "=", "int", "(", "split_token", "[", "1", "]", ")", "except", "ValueError", ":", "raise", "XSRFTokenMalformed", "(", ")", "if", "timeout", "is", "not", "None", ":", "if", "current_time", "is", "None", ":", "current_time", "=", "time", ".", "time", "(", ")", "# If an attacker modifies the plain text time then it will not match", "# the hashed time so this check is sufficient.", "if", "(", "token_time", "+", "timeout", ")", "<", "current_time", ":", "raise", "XSRFTokenExpiredException", "(", ")", "expected_token", "=", "XSRFToken", "(", "self", ".", "user_id", ",", "self", ".", "secret", ",", "token_time", ")", "expected_token_string", "=", "expected_token", ".", "generate_token_string", "(", "action", ")", "if", "len", "(", "expected_token_string", ")", "!=", "len", "(", "token_string", ")", ":", "raise", "XSRFTokenInvalid", "(", ")", "# Compare the two strings in constant time to prevent timing attacks.", "different", "=", "0", "for", "a", ",", "b", "in", "zip", "(", "token_string", ",", "expected_token_string", ")", ":", "different", "|=", "ord", "(", "a", ")", "^", "ord", "(", "b", ")", "if", "different", ":", "raise", "XSRFTokenInvalid", "(", ")" ]
Generate a hash of the given token contents that can be verified. :param token_string: A string containing the hashed token (generated by `generate_token_string`). :param action: A string containing the action that is being verified. :param timeout: An int or float representing the number of seconds that the token is valid for. If None then tokens are valid forever. :current_time: An int representing the number of seconds since the epoch. Will be used by to check for token expiry if `timeout` is set. If `None` then the current time will be used. :raises: XSRFTokenMalformed if the given token_string cannot be parsed. XSRFTokenExpiredException if the given token string is expired. XSRFTokenInvalid if the given token string does not match the contents of the `XSRFToken`.
[ "Generate", "a", "hash", "of", "the", "given", "token", "contents", "that", "can", "be", "verified", "." ]
python
train
40.457627
mikicz/arca
arca/_arca.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/_arca.py#L459-L472
def validate_reference(self, reference: ReferenceDefinitionType) -> Optional[Path]: """ Converts reference to :class:`Path <pathlib.Path>` :raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`. """ if reference is not None: if isinstance(reference, bytes): reference = reference.decode("utf-8") try: return Path(reference) except TypeError: raise ValueError(f"Can't convert reference path {reference} to a pathlib.Path") return None
[ "def", "validate_reference", "(", "self", ",", "reference", ":", "ReferenceDefinitionType", ")", "->", "Optional", "[", "Path", "]", ":", "if", "reference", "is", "not", "None", ":", "if", "isinstance", "(", "reference", ",", "bytes", ")", ":", "reference", "=", "reference", ".", "decode", "(", "\"utf-8\"", ")", "try", ":", "return", "Path", "(", "reference", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "f\"Can't convert reference path {reference} to a pathlib.Path\"", ")", "return", "None" ]
Converts reference to :class:`Path <pathlib.Path>` :raise ValueError: If ``reference`` can't be converted to :class:`Path <pathlib.Path>`.
[ "Converts", "reference", "to", ":", "class", ":", "Path", "<pathlib", ".", "Path", ">" ]
python
train
41.285714
timeyyy/apptools
peasoup/pidutil.py
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/pidutil.py#L103-L124
def listpid(toggle='basic'): # Add method to exclude elements from list '''list pids''' proc=psutil.process_iter()# evalute if its better to keep one instance of this or generate here? if toggle=='basic': host=gethostname() host2=os.getenv('HOME').split(sep='/' )[-1] for row in proc: #~ DPRINT([row.ppid(),row.name(),host],'username,row.name,host') if row.username() in host or row.username() in host2: #new psutil using grabing timeyyy and not alfa for username so host 2 is getting the timeyyy on UBUNTU yield row.name(), row.ppid() elif toggle=='all': for row in proc: yield row.name(), row.ppid() elif toggle =='windows-basic': for row in proc: try: pname = psutil.Process(row.pid).name() pname = pname[:-4]#removiing .exe from end yield pname, row.pid except: pass
[ "def", "listpid", "(", "toggle", "=", "'basic'", ")", ":", "# Add method to exclude elements from list", "proc", "=", "psutil", ".", "process_iter", "(", ")", "# evalute if its better to keep one instance of this or generate here?", "if", "toggle", "==", "'basic'", ":", "host", "=", "gethostname", "(", ")", "host2", "=", "os", ".", "getenv", "(", "'HOME'", ")", ".", "split", "(", "sep", "=", "'/'", ")", "[", "-", "1", "]", "for", "row", "in", "proc", ":", "#~ DPRINT([row.ppid(),row.name(),host],'username,row.name,host')", "if", "row", ".", "username", "(", ")", "in", "host", "or", "row", ".", "username", "(", ")", "in", "host2", ":", "#new psutil using grabing timeyyy and not alfa for username so host 2 is getting the timeyyy on UBUNTU ", "yield", "row", ".", "name", "(", ")", ",", "row", ".", "ppid", "(", ")", "elif", "toggle", "==", "'all'", ":", "for", "row", "in", "proc", ":", "yield", "row", ".", "name", "(", ")", ",", "row", ".", "ppid", "(", ")", "elif", "toggle", "==", "'windows-basic'", ":", "for", "row", "in", "proc", ":", "try", ":", "pname", "=", "psutil", ".", "Process", "(", "row", ".", "pid", ")", ".", "name", "(", ")", "pname", "=", "pname", "[", ":", "-", "4", "]", "#removiing .exe from end", "yield", "pname", ",", "row", ".", "pid", "except", ":", "pass" ]
list pids
[ "list", "pids" ]
python
train
44
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/rst/urml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/rst/urml.py#L388-L400
def extract_segment_types(urml_document_element, namespace): """Return a map from segment node IDs to their segment type ('nucleus', 'satellite' or 'isolated'). """ segment_types = \ {namespace+':'+seg.attrib['id']: seg.tag for seg in urml_document_element.iter('nucleus', 'satellite')} for seg in urml_document_element.iter('segment'): seg_id = namespace+':'+seg.attrib['id'] if seg_id not in segment_types: segment_types[seg_id] = 'isolated' return segment_types
[ "def", "extract_segment_types", "(", "urml_document_element", ",", "namespace", ")", ":", "segment_types", "=", "{", "namespace", "+", "':'", "+", "seg", ".", "attrib", "[", "'id'", "]", ":", "seg", ".", "tag", "for", "seg", "in", "urml_document_element", ".", "iter", "(", "'nucleus'", ",", "'satellite'", ")", "}", "for", "seg", "in", "urml_document_element", ".", "iter", "(", "'segment'", ")", ":", "seg_id", "=", "namespace", "+", "':'", "+", "seg", ".", "attrib", "[", "'id'", "]", "if", "seg_id", "not", "in", "segment_types", ":", "segment_types", "[", "seg_id", "]", "=", "'isolated'", "return", "segment_types" ]
Return a map from segment node IDs to their segment type ('nucleus', 'satellite' or 'isolated').
[ "Return", "a", "map", "from", "segment", "node", "IDs", "to", "their", "segment", "type", "(", "nucleus", "satellite", "or", "isolated", ")", "." ]
python
train
40.153846
keans/lmnotify
lmnotify/lmnotify.py
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L350-L369
def set_display(self, brightness=100, brightness_mode="auto"): """ allows to modify display state (change brightness) :param int brightness: display brightness [0, 100] (default: 100) :param str brightness_mode: the brightness mode of the display [auto, manual] (default: auto) """ assert(brightness_mode in ("auto", "manual")) assert(brightness in range(101)) log.debug("setting display information...") cmd, url = DEVICE_URLS["set_display"] json_data = { "brightness_mode": brightness_mode, "brightness": brightness } return self._exec(cmd, url, json_data=json_data)
[ "def", "set_display", "(", "self", ",", "brightness", "=", "100", ",", "brightness_mode", "=", "\"auto\"", ")", ":", "assert", "(", "brightness_mode", "in", "(", "\"auto\"", ",", "\"manual\"", ")", ")", "assert", "(", "brightness", "in", "range", "(", "101", ")", ")", "log", ".", "debug", "(", "\"setting display information...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"set_display\"", "]", "json_data", "=", "{", "\"brightness_mode\"", ":", "brightness_mode", ",", "\"brightness\"", ":", "brightness", "}", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ",", "json_data", "=", "json_data", ")" ]
allows to modify display state (change brightness) :param int brightness: display brightness [0, 100] (default: 100) :param str brightness_mode: the brightness mode of the display [auto, manual] (default: auto)
[ "allows", "to", "modify", "display", "state", "(", "change", "brightness", ")" ]
python
train
35.45
pybel/pybel
src/pybel/manager/lookup_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/lookup_manager.py#L17-L21
def get_dsl_by_hash(self, node_hash: str) -> Optional[BaseEntity]: """Look up a node by the hash and returns the corresponding PyBEL node tuple.""" node = self.get_node_by_hash(node_hash) if node is not None: return node.as_bel()
[ "def", "get_dsl_by_hash", "(", "self", ",", "node_hash", ":", "str", ")", "->", "Optional", "[", "BaseEntity", "]", ":", "node", "=", "self", ".", "get_node_by_hash", "(", "node_hash", ")", "if", "node", "is", "not", "None", ":", "return", "node", ".", "as_bel", "(", ")" ]
Look up a node by the hash and returns the corresponding PyBEL node tuple.
[ "Look", "up", "a", "node", "by", "the", "hash", "and", "returns", "the", "corresponding", "PyBEL", "node", "tuple", "." ]
python
train
52.2
mabuchilab/QNET
src/qnet/printing/asciiprinter.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/asciiprinter.py#L112-L118
def _render_hs_label(self, hs): """Return the label of the given Hilbert space as a string""" if isinstance(hs.__class__, Singleton): return self._render_str(hs.label) else: return self._tensor_sym.join( [self._render_str(ls.label) for ls in hs.local_factors])
[ "def", "_render_hs_label", "(", "self", ",", "hs", ")", ":", "if", "isinstance", "(", "hs", ".", "__class__", ",", "Singleton", ")", ":", "return", "self", ".", "_render_str", "(", "hs", ".", "label", ")", "else", ":", "return", "self", ".", "_tensor_sym", ".", "join", "(", "[", "self", ".", "_render_str", "(", "ls", ".", "label", ")", "for", "ls", "in", "hs", ".", "local_factors", "]", ")" ]
Return the label of the given Hilbert space as a string
[ "Return", "the", "label", "of", "the", "given", "Hilbert", "space", "as", "a", "string" ]
python
train
45.428571
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L977-L993
def keyUp(key, pause=None, _pause=True): """Performs a keyboard key release (without the press down beforehand). Args: key (str): The key to be released up. The valid names are listed in KEYBOARD_KEYS. Returns: None """ if len(key) > 1: key = key.lower() _failSafeCheck() platformModule._keyUp(key) _autoPause(pause, _pause)
[ "def", "keyUp", "(", "key", ",", "pause", "=", "None", ",", "_pause", "=", "True", ")", ":", "if", "len", "(", "key", ")", ">", "1", ":", "key", "=", "key", ".", "lower", "(", ")", "_failSafeCheck", "(", ")", "platformModule", ".", "_keyUp", "(", "key", ")", "_autoPause", "(", "pause", ",", "_pause", ")" ]
Performs a keyboard key release (without the press down beforehand). Args: key (str): The key to be released up. The valid names are listed in KEYBOARD_KEYS. Returns: None
[ "Performs", "a", "keyboard", "key", "release", "(", "without", "the", "press", "down", "beforehand", ")", "." ]
python
train
21.764706
janpipek/physt
physt/io/protobuf/__init__.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/io/protobuf/__init__.py#L24-L76
def write(histogram): """Convert a histogram to a protobuf message. Note: Currently, all binnings are converted to static form. When you load the histogram again, you will lose any related behaviour. Note: A histogram collection is also planned. Parameters ---------- histogram : HistogramBase | list | dict Any histogram Returns ------- message : google.protobuf.message.Message A protocol buffer message """ histogram_dict = histogram.to_dict() message = Histogram() for field in SIMPLE_CONVERSION_FIELDS: setattr(message, field, histogram_dict[field]) # Main numerical data - TODO: Optimize! message.frequencies.extend(histogram.frequencies.flatten()) message.errors2.extend(histogram.errors2.flatten()) # Binnings for binning in histogram._binnings: binning_message = message.binnings.add() for edges in binning.bins: limits = binning_message.bins.add() limits.lower = edges[0] limits.upper = edges[1] # All meta data meta_message = message.meta # user_defined = {} # for key, value in histogram.meta_data.items(): # if key not in PREDEFINED: # user_defined[str(key)] = str(value) for key in SIMPLE_META_KEYS: if key in histogram.meta_data: setattr(meta_message, key, str(histogram.meta_data[key])) if "axis_names" in histogram.meta_data: meta_message.axis_names.extend(histogram.meta_data["axis_names"]) message.physt_version = CURRENT_VERSION message.physt_compatible = COMPATIBLE_VERSION return message
[ "def", "write", "(", "histogram", ")", ":", "histogram_dict", "=", "histogram", ".", "to_dict", "(", ")", "message", "=", "Histogram", "(", ")", "for", "field", "in", "SIMPLE_CONVERSION_FIELDS", ":", "setattr", "(", "message", ",", "field", ",", "histogram_dict", "[", "field", "]", ")", "# Main numerical data - TODO: Optimize!", "message", ".", "frequencies", ".", "extend", "(", "histogram", ".", "frequencies", ".", "flatten", "(", ")", ")", "message", ".", "errors2", ".", "extend", "(", "histogram", ".", "errors2", ".", "flatten", "(", ")", ")", "# Binnings", "for", "binning", "in", "histogram", ".", "_binnings", ":", "binning_message", "=", "message", ".", "binnings", ".", "add", "(", ")", "for", "edges", "in", "binning", ".", "bins", ":", "limits", "=", "binning_message", ".", "bins", ".", "add", "(", ")", "limits", ".", "lower", "=", "edges", "[", "0", "]", "limits", ".", "upper", "=", "edges", "[", "1", "]", "# All meta data", "meta_message", "=", "message", ".", "meta", "# user_defined = {}", "# for key, value in histogram.meta_data.items():", "# if key not in PREDEFINED:", "# user_defined[str(key)] = str(value)", "for", "key", "in", "SIMPLE_META_KEYS", ":", "if", "key", "in", "histogram", ".", "meta_data", ":", "setattr", "(", "meta_message", ",", "key", ",", "str", "(", "histogram", ".", "meta_data", "[", "key", "]", ")", ")", "if", "\"axis_names\"", "in", "histogram", ".", "meta_data", ":", "meta_message", ".", "axis_names", ".", "extend", "(", "histogram", ".", "meta_data", "[", "\"axis_names\"", "]", ")", "message", ".", "physt_version", "=", "CURRENT_VERSION", "message", ".", "physt_compatible", "=", "COMPATIBLE_VERSION", "return", "message" ]
Convert a histogram to a protobuf message. Note: Currently, all binnings are converted to static form. When you load the histogram again, you will lose any related behaviour. Note: A histogram collection is also planned. Parameters ---------- histogram : HistogramBase | list | dict Any histogram Returns ------- message : google.protobuf.message.Message A protocol buffer message
[ "Convert", "a", "histogram", "to", "a", "protobuf", "message", "." ]
python
train
30.54717
gholt/swiftly
swiftly/cli/optionparser.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/optionparser.py#L129-L135
def print_usage(self, file=None): """ Outputs usage information to the file if specified, or to the io_manager's stdout if available, or to sys.stdout. """ optparse.OptionParser.print_usage(self, file) file.flush()
[ "def", "print_usage", "(", "self", ",", "file", "=", "None", ")", ":", "optparse", ".", "OptionParser", ".", "print_usage", "(", "self", ",", "file", ")", "file", ".", "flush", "(", ")" ]
Outputs usage information to the file if specified, or to the io_manager's stdout if available, or to sys.stdout.
[ "Outputs", "usage", "information", "to", "the", "file", "if", "specified", "or", "to", "the", "io_manager", "s", "stdout", "if", "available", "or", "to", "sys", ".", "stdout", "." ]
python
test
36.571429
dnanexus/dx-toolkit
src/python/dxpy/workflow_builder.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/workflow_builder.py#L122-L137
def _set_categories_on_workflow(global_workflow_id, categories_to_set): """ Note: Categories are set on the workflow series level, i.e. the same set applies to all versions. """ assert(isinstance(categories_to_set, list)) existing_categories = dxpy.api.global_workflow_list_categories(global_workflow_id)['categories'] categories_to_add = set(categories_to_set).difference(set(existing_categories)) categories_to_remove = set(existing_categories).difference(set(categories_to_set)) if categories_to_add: dxpy.api.global_workflow_add_categories(global_workflow_id, input_params={'categories': list(categories_to_add)}) if categories_to_remove: dxpy.api.global_workflow_remove_categories(global_workflow_id, input_params={'categories': list(categories_to_remove)})
[ "def", "_set_categories_on_workflow", "(", "global_workflow_id", ",", "categories_to_set", ")", ":", "assert", "(", "isinstance", "(", "categories_to_set", ",", "list", ")", ")", "existing_categories", "=", "dxpy", ".", "api", ".", "global_workflow_list_categories", "(", "global_workflow_id", ")", "[", "'categories'", "]", "categories_to_add", "=", "set", "(", "categories_to_set", ")", ".", "difference", "(", "set", "(", "existing_categories", ")", ")", "categories_to_remove", "=", "set", "(", "existing_categories", ")", ".", "difference", "(", "set", "(", "categories_to_set", ")", ")", "if", "categories_to_add", ":", "dxpy", ".", "api", ".", "global_workflow_add_categories", "(", "global_workflow_id", ",", "input_params", "=", "{", "'categories'", ":", "list", "(", "categories_to_add", ")", "}", ")", "if", "categories_to_remove", ":", "dxpy", ".", "api", ".", "global_workflow_remove_categories", "(", "global_workflow_id", ",", "input_params", "=", "{", "'categories'", ":", "list", "(", "categories_to_remove", ")", "}", ")" ]
Note: Categories are set on the workflow series level, i.e. the same set applies to all versions.
[ "Note", ":", "Categories", "are", "set", "on", "the", "workflow", "series", "level", "i", ".", "e", ".", "the", "same", "set", "applies", "to", "all", "versions", "." ]
python
train
56.4375
ethereum/eth-abi
eth_abi/registry.py
https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L258-L267
def has_arrlist(type_str): """ A predicate that matches a type string with an array dimension list. """ try: abi_type = grammar.parse(type_str) except exceptions.ParseError: return False return abi_type.arrlist is not None
[ "def", "has_arrlist", "(", "type_str", ")", ":", "try", ":", "abi_type", "=", "grammar", ".", "parse", "(", "type_str", ")", "except", "exceptions", ".", "ParseError", ":", "return", "False", "return", "abi_type", ".", "arrlist", "is", "not", "None" ]
A predicate that matches a type string with an array dimension list.
[ "A", "predicate", "that", "matches", "a", "type", "string", "with", "an", "array", "dimension", "list", "." ]
python
train
25.4
martinpitt/python-dbusmock
dbusmock/templates/ofono.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/ofono.py#L169-L190
def add_voice_call_api(mock): '''Add org.ofono.VoiceCallManager API to a mock''' # also add an emergency number which is not a real one, in case one runs a # test case against a production ofono :-) mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373']) mock.calls = [] # object paths mock.AddMethods('org.ofono.VoiceCallManager', [ ('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'), ('Transfer', '', '', ''), ('SwapCalls', '', '', ''), ('ReleaseAndAnswer', '', '', ''), ('ReleaseAndSwap', '', '', ''), ('HoldAndAnswer', '', '', ''), ('SendTones', 's', '', ''), ('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED), ('CreateMultiparty', '', 'o', NOT_IMPLEMENTED), ('HangupMultiparty', '', '', NOT_IMPLEMENTED), ('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]') ])
[ "def", "add_voice_call_api", "(", "mock", ")", ":", "# also add an emergency number which is not a real one, in case one runs a", "# test case against a production ofono :-)", "mock", ".", "AddProperty", "(", "'org.ofono.VoiceCallManager'", ",", "'EmergencyNumbers'", ",", "[", "'911'", ",", "'13373'", "]", ")", "mock", ".", "calls", "=", "[", "]", "# object paths", "mock", ".", "AddMethods", "(", "'org.ofono.VoiceCallManager'", ",", "[", "(", "'GetProperties'", ",", "''", ",", "'a{sv}'", ",", "'ret = self.GetAll(\"org.ofono.VoiceCallManager\")'", ")", ",", "(", "'Transfer'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'SwapCalls'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'ReleaseAndAnswer'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'ReleaseAndSwap'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'HoldAndAnswer'", ",", "''", ",", "''", ",", "''", ")", ",", "(", "'SendTones'", ",", "'s'", ",", "''", ",", "''", ")", ",", "(", "'PrivateChat'", ",", "'o'", ",", "'ao'", ",", "NOT_IMPLEMENTED", ")", ",", "(", "'CreateMultiparty'", ",", "''", ",", "'o'", ",", "NOT_IMPLEMENTED", ")", ",", "(", "'HangupMultiparty'", ",", "''", ",", "''", ",", "NOT_IMPLEMENTED", ")", ",", "(", "'GetCalls'", ",", "''", ",", "'a(oa{sv})'", ",", "'ret = [(c, objects[c].GetAll(\"org.ofono.VoiceCall\")) for c in self.calls]'", ")", "]", ")" ]
Add org.ofono.VoiceCallManager API to a mock
[ "Add", "org", ".", "ofono", ".", "VoiceCallManager", "API", "to", "a", "mock" ]
python
train
44.181818
visualfabriq/bquery
bquery/ctable.py
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/ctable.py#L95-L152
def cache_factor(self, col_list, refresh=False): """ Existing todos here are: these should be hidden helper carrays As in: not normal columns that you would normally see as a user The factor (label index) carray is as long as the original carray (and the rest of the table therefore) But the (unique) values carray is not as long (as long as the number of unique values) :param col_list: :param refresh: :return: """ if not self.rootdir: raise TypeError('Only out-of-core ctables can have ' 'factorization caching at the moment') if not isinstance(col_list, list): col_list = [col_list] if refresh: kill_list = [x for x in os.listdir(self.rootdir) if '.factor' in x or '.values' in x] for kill_dir in kill_list: rm_file_or_dir(os.path.join(self.rootdir, kill_dir)) for col in col_list: # create cache if needed if refresh or not self.cache_valid(col): # todo: also add locking mechanism here # create directories col_rootdir = self[col].rootdir col_factor_rootdir = col_rootdir + '.factor' col_factor_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-') col_values_rootdir = col_rootdir + '.values' col_values_rootdir_tmp = tempfile.mkdtemp(prefix='bcolz-') # create factor carray_factor = \ bcolz.carray([], dtype='int64', expectedlen=self.size, rootdir=col_factor_rootdir_tmp, mode='w') _, values = \ ctable_ext.factorize(self[col], labels=carray_factor) carray_factor.flush() rm_file_or_dir(col_factor_rootdir, ignore_errors=True) shutil.move(col_factor_rootdir_tmp, col_factor_rootdir) # create values carray_values = \ bcolz.carray(np.fromiter(values.values(), dtype=self[col].dtype), rootdir=col_values_rootdir_tmp, mode='w') carray_values.flush() rm_file_or_dir(col_values_rootdir, ignore_errors=True) shutil.move(col_values_rootdir_tmp, col_values_rootdir)
[ "def", "cache_factor", "(", "self", ",", "col_list", ",", "refresh", "=", "False", ")", ":", "if", "not", "self", ".", "rootdir", ":", "raise", "TypeError", "(", "'Only out-of-core ctables can have '", "'factorization caching at the moment'", ")", "if", "not", "isinstance", "(", "col_list", ",", "list", ")", ":", "col_list", "=", "[", "col_list", "]", "if", "refresh", ":", "kill_list", "=", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "self", ".", "rootdir", ")", "if", "'.factor'", "in", "x", "or", "'.values'", "in", "x", "]", "for", "kill_dir", "in", "kill_list", ":", "rm_file_or_dir", "(", "os", ".", "path", ".", "join", "(", "self", ".", "rootdir", ",", "kill_dir", ")", ")", "for", "col", "in", "col_list", ":", "# create cache if needed", "if", "refresh", "or", "not", "self", ".", "cache_valid", "(", "col", ")", ":", "# todo: also add locking mechanism here", "# create directories", "col_rootdir", "=", "self", "[", "col", "]", ".", "rootdir", "col_factor_rootdir", "=", "col_rootdir", "+", "'.factor'", "col_factor_rootdir_tmp", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'bcolz-'", ")", "col_values_rootdir", "=", "col_rootdir", "+", "'.values'", "col_values_rootdir_tmp", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "'bcolz-'", ")", "# create factor", "carray_factor", "=", "bcolz", ".", "carray", "(", "[", "]", ",", "dtype", "=", "'int64'", ",", "expectedlen", "=", "self", ".", "size", ",", "rootdir", "=", "col_factor_rootdir_tmp", ",", "mode", "=", "'w'", ")", "_", ",", "values", "=", "ctable_ext", ".", "factorize", "(", "self", "[", "col", "]", ",", "labels", "=", "carray_factor", ")", "carray_factor", ".", "flush", "(", ")", "rm_file_or_dir", "(", "col_factor_rootdir", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "move", "(", "col_factor_rootdir_tmp", ",", "col_factor_rootdir", ")", "# create values", "carray_values", "=", "bcolz", ".", "carray", "(", "np", ".", "fromiter", "(", "values", ".", "values", "(", ")", ",", "dtype", "=", "self", "[", "col", "]", ".", "dtype", ")", ",", "rootdir", "=", "col_values_rootdir_tmp", ",", "mode", "=", "'w'", ")", "carray_values", ".", "flush", "(", ")", "rm_file_or_dir", "(", "col_values_rootdir", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "move", "(", "col_values_rootdir_tmp", ",", "col_values_rootdir", ")" ]
Existing todos here are: these should be hidden helper carrays As in: not normal columns that you would normally see as a user The factor (label index) carray is as long as the original carray (and the rest of the table therefore) But the (unique) values carray is not as long (as long as the number of unique values) :param col_list: :param refresh: :return:
[ "Existing", "todos", "here", "are", ":", "these", "should", "be", "hidden", "helper", "carrays", "As", "in", ":", "not", "normal", "columns", "that", "you", "would", "normally", "see", "as", "a", "user" ]
python
train
40.741379
fermiPy/fermipy
fermipy/diffuse/model_manager.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/model_manager.py#L232-L257
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml): """ Build up the library of all the components Parameters ---------- diffuse_yaml : str Name of the yaml file with the library of diffuse component definitions catalog_yaml : str Name of the yaml file width the library of catalog split definitions binning_yaml : str Name of the yaml file with the binning definitions """ ret_dict = {} #catalog_dict = yaml.safe_load(open(catalog_yaml)) components_dict = Component.build_from_yamlfile(binning_yaml) diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm, DiffuseModelManager=self._dmm, library=diffuse_yaml, components=components_dict) catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml, CatalogSourceManager=self._csm) ret_dict.update(diffuse_ret_dict['comp_info_dict']) ret_dict.update(catalog_ret_dict['comp_info_dict']) self._library.update(ret_dict) return ret_dict
[ "def", "make_library", "(", "self", ",", "diffuse_yaml", ",", "catalog_yaml", ",", "binning_yaml", ")", ":", "ret_dict", "=", "{", "}", "#catalog_dict = yaml.safe_load(open(catalog_yaml))", "components_dict", "=", "Component", ".", "build_from_yamlfile", "(", "binning_yaml", ")", "diffuse_ret_dict", "=", "make_diffuse_comp_info_dict", "(", "GalpropMapManager", "=", "self", ".", "_gmm", ",", "DiffuseModelManager", "=", "self", ".", "_dmm", ",", "library", "=", "diffuse_yaml", ",", "components", "=", "components_dict", ")", "catalog_ret_dict", "=", "make_catalog_comp_dict", "(", "library", "=", "catalog_yaml", ",", "CatalogSourceManager", "=", "self", ".", "_csm", ")", "ret_dict", ".", "update", "(", "diffuse_ret_dict", "[", "'comp_info_dict'", "]", ")", "ret_dict", ".", "update", "(", "catalog_ret_dict", "[", "'comp_info_dict'", "]", ")", "self", ".", "_library", ".", "update", "(", "ret_dict", ")", "return", "ret_dict" ]
Build up the library of all the components Parameters ---------- diffuse_yaml : str Name of the yaml file with the library of diffuse component definitions catalog_yaml : str Name of the yaml file width the library of catalog split definitions binning_yaml : str Name of the yaml file with the binning definitions
[ "Build", "up", "the", "library", "of", "all", "the", "components" ]
python
train
49
merll/docker-map
dockermap/client/docker_util.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/client/docker_util.py#L259-L301
def remove_all_containers(self, stop_timeout=10, list_only=False): """ First stops (if necessary) and them removes all containers present on the Docker instance. :param stop_timeout: Timeout to stopping each container. :type stop_timeout: int :param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them. :type list_only: bool :return: A tuple of two lists: Stopped container ids, and removed container ids. :rtype: (list[unicode | str], list[unicode | str]) """ containers = [(container['Id'], container['Status']) for container in self.containers(all=True)] running_containers = [c_id for c_id, status in containers if not (status.startswith('Exited') or status == 'Dead')] if list_only: return running_containers, [c[0] for c in containers] stopped_containers = [] for c_id in running_containers: try: self.stop(c_id, timeout=stop_timeout) except Timeout: log.warning("Container %s did not stop in time - sent SIGKILL.", c_id) try: self.wait(c_id, timeout=stop_timeout) except Timeout: pass except: exc_info = sys.exc_info() raise PartialResultsError(exc_info, (stopped_containers, [])) else: stopped_containers.append(c_id) removed_containers = [] for c_id, __ in containers: try: self.remove_container(c_id) except: exc_info = sys.exc_info() raise PartialResultsError(exc_info, (stopped_containers, removed_containers)) else: removed_containers.append(c_id) return stopped_containers, removed_containers
[ "def", "remove_all_containers", "(", "self", ",", "stop_timeout", "=", "10", ",", "list_only", "=", "False", ")", ":", "containers", "=", "[", "(", "container", "[", "'Id'", "]", ",", "container", "[", "'Status'", "]", ")", "for", "container", "in", "self", ".", "containers", "(", "all", "=", "True", ")", "]", "running_containers", "=", "[", "c_id", "for", "c_id", ",", "status", "in", "containers", "if", "not", "(", "status", ".", "startswith", "(", "'Exited'", ")", "or", "status", "==", "'Dead'", ")", "]", "if", "list_only", ":", "return", "running_containers", ",", "[", "c", "[", "0", "]", "for", "c", "in", "containers", "]", "stopped_containers", "=", "[", "]", "for", "c_id", "in", "running_containers", ":", "try", ":", "self", ".", "stop", "(", "c_id", ",", "timeout", "=", "stop_timeout", ")", "except", "Timeout", ":", "log", ".", "warning", "(", "\"Container %s did not stop in time - sent SIGKILL.\"", ",", "c_id", ")", "try", ":", "self", ".", "wait", "(", "c_id", ",", "timeout", "=", "stop_timeout", ")", "except", "Timeout", ":", "pass", "except", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "raise", "PartialResultsError", "(", "exc_info", ",", "(", "stopped_containers", ",", "[", "]", ")", ")", "else", ":", "stopped_containers", ".", "append", "(", "c_id", ")", "removed_containers", "=", "[", "]", "for", "c_id", ",", "__", "in", "containers", ":", "try", ":", "self", ".", "remove_container", "(", "c_id", ")", "except", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "raise", "PartialResultsError", "(", "exc_info", ",", "(", "stopped_containers", ",", "removed_containers", ")", ")", "else", ":", "removed_containers", ".", "append", "(", "c_id", ")", "return", "stopped_containers", ",", "removed_containers" ]
First stops (if necessary) and them removes all containers present on the Docker instance. :param stop_timeout: Timeout to stopping each container. :type stop_timeout: int :param list_only: When set to ``True`` only lists containers, but does not actually stop or remove them. :type list_only: bool :return: A tuple of two lists: Stopped container ids, and removed container ids. :rtype: (list[unicode | str], list[unicode | str])
[ "First", "stops", "(", "if", "necessary", ")", "and", "them", "removes", "all", "containers", "present", "on", "the", "Docker", "instance", "." ]
python
train
45.116279
mbj4668/pyang
pyang/translators/dsdl.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L620-L650
def apply_augments(self, auglist, p_elem, pset): """Handle substatements of augments from `auglist`. The augments are applied in the context of `p_elem`. `pset` is a patch set containing patches that may be applicable to descendants. """ for a in auglist: par = a.parent if a.search_one("when") is None: wel = p_elem else: if p_elem.interleave: kw = "interleave" else: kw = "group" wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave) wel.occur = p_elem.occur if par.keyword == "uses": self.handle_substmts(a, wel, pset) continue if par.keyword == "submodule": mnam = par.i_including_modulename else: mnam = par.arg if self.prefix_stack[-1] == self.module_prefixes[mnam]: self.handle_substmts(a, wel, pset) else: self.prefix_stack.append(self.module_prefixes[mnam]) self.handle_substmts(a, wel, pset) self.prefix_stack.pop()
[ "def", "apply_augments", "(", "self", ",", "auglist", ",", "p_elem", ",", "pset", ")", ":", "for", "a", "in", "auglist", ":", "par", "=", "a", ".", "parent", "if", "a", ".", "search_one", "(", "\"when\"", ")", "is", "None", ":", "wel", "=", "p_elem", "else", ":", "if", "p_elem", ".", "interleave", ":", "kw", "=", "\"interleave\"", "else", ":", "kw", "=", "\"group\"", "wel", "=", "SchemaNode", "(", "kw", ",", "p_elem", ",", "interleave", "=", "p_elem", ".", "interleave", ")", "wel", ".", "occur", "=", "p_elem", ".", "occur", "if", "par", ".", "keyword", "==", "\"uses\"", ":", "self", ".", "handle_substmts", "(", "a", ",", "wel", ",", "pset", ")", "continue", "if", "par", ".", "keyword", "==", "\"submodule\"", ":", "mnam", "=", "par", ".", "i_including_modulename", "else", ":", "mnam", "=", "par", ".", "arg", "if", "self", ".", "prefix_stack", "[", "-", "1", "]", "==", "self", ".", "module_prefixes", "[", "mnam", "]", ":", "self", ".", "handle_substmts", "(", "a", ",", "wel", ",", "pset", ")", "else", ":", "self", ".", "prefix_stack", ".", "append", "(", "self", ".", "module_prefixes", "[", "mnam", "]", ")", "self", ".", "handle_substmts", "(", "a", ",", "wel", ",", "pset", ")", "self", ".", "prefix_stack", ".", "pop", "(", ")" ]
Handle substatements of augments from `auglist`. The augments are applied in the context of `p_elem`. `pset` is a patch set containing patches that may be applicable to descendants.
[ "Handle", "substatements", "of", "augments", "from", "auglist", "." ]
python
train
38.516129
stevelittlefish/littlefish
littlefish/htmlutil.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/htmlutil.py#L40-L85
def split_line(line, min_line_length=30, max_line_length=100): """ This is designed to work with prettified output from Beautiful Soup which indents with a single space. :param line: The line to split :param min_line_length: The minimum desired line length :param max_line_length: The maximum desired line length :return: A list of lines """ if len(line) <= max_line_length: # No need to split! return [line] # First work out the indentation on the beginning of the line indent = 0 while line[indent] == ' ' and indent < len(line): indent += 1 # Try to split the line # Start looking for a space at character max_line_length working backwards i = max_line_length split_point = None while i > min_line_length: if line[i] == ' ': split_point = i break i -= 1 if split_point is None: # We didn't find a split point - search beyond the end of the line i = max_line_length + 1 while i < len(line): if line[i] == ' ': split_point = i break i += 1 if split_point is None: # There is nowhere to split the line! return [line] else: # Split it! line1 = line[:split_point] line2 = ' ' * indent + line[split_point + 1:] return [line1] + split_line(line2, min_line_length, max_line_length)
[ "def", "split_line", "(", "line", ",", "min_line_length", "=", "30", ",", "max_line_length", "=", "100", ")", ":", "if", "len", "(", "line", ")", "<=", "max_line_length", ":", "# No need to split!", "return", "[", "line", "]", "# First work out the indentation on the beginning of the line", "indent", "=", "0", "while", "line", "[", "indent", "]", "==", "' '", "and", "indent", "<", "len", "(", "line", ")", ":", "indent", "+=", "1", "# Try to split the line", "# Start looking for a space at character max_line_length working backwards", "i", "=", "max_line_length", "split_point", "=", "None", "while", "i", ">", "min_line_length", ":", "if", "line", "[", "i", "]", "==", "' '", ":", "split_point", "=", "i", "break", "i", "-=", "1", "if", "split_point", "is", "None", ":", "# We didn't find a split point - search beyond the end of the line", "i", "=", "max_line_length", "+", "1", "while", "i", "<", "len", "(", "line", ")", ":", "if", "line", "[", "i", "]", "==", "' '", ":", "split_point", "=", "i", "break", "i", "+=", "1", "if", "split_point", "is", "None", ":", "# There is nowhere to split the line!", "return", "[", "line", "]", "else", ":", "# Split it!", "line1", "=", "line", "[", ":", "split_point", "]", "line2", "=", "' '", "*", "indent", "+", "line", "[", "split_point", "+", "1", ":", "]", "return", "[", "line1", "]", "+", "split_line", "(", "line2", ",", "min_line_length", ",", "max_line_length", ")" ]
This is designed to work with prettified output from Beautiful Soup which indents with a single space. :param line: The line to split :param min_line_length: The minimum desired line length :param max_line_length: The maximum desired line length :return: A list of lines
[ "This", "is", "designed", "to", "work", "with", "prettified", "output", "from", "Beautiful", "Soup", "which", "indents", "with", "a", "single", "space", "." ]
python
test
30.543478
agrc/agrc.python
agrc/update.py
https://github.com/agrc/agrc.python/blob/be427e919bd4cdd6f19524b7f7fe18882429c25b/agrc/update.py#L164-L248
def checkForChanges(f, sde, isTable): """ returns False if there are no changes """ # try simple feature count first fCount = int(arcpy.GetCount_management(f).getOutput(0)) sdeCount = int(arcpy.GetCount_management(sde).getOutput(0)) if fCount != sdeCount: return True fields = [fld.name for fld in arcpy.ListFields(f)] # filter out shape fields if not isTable: fields = filter_fields(fields) d = arcpy.Describe(f) shapeType = d.shapeType if shapeType == 'Polygon': shapeToken = 'SHAPE@AREA' elif shapeType == 'Polyline': shapeToken = 'SHAPE@LENGTH' elif shapeType == 'Point': shapeToken = 'SHAPE@XY' else: shapeToken = 'SHAPE@JSON' fields.append(shapeToken) def parseShape(shapeValue): if shapeValue is None: return 0 elif shapeType in ['Polygon', 'Polyline']: return shapeValue elif shapeType == 'Point': if shapeValue[0] is not None and shapeValue[1] is not None: return shapeValue[0] + shapeValue[1] else: return 0 else: return shapeValue outputSR = arcpy.Describe(f).spatialReference else: outputSR = None changed = False with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \ arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'), spatial_reference=outputSR) as sdeCursor: for fRow, sdeRow in izip(fCursor, sdeCursor): if fRow != sdeRow: # check shapes first if fRow[-1] != sdeRow[-1] and not isTable: if shapeType not in ['Polygon', 'Polyline', 'Point']: changed = True break fShape = parseShape(fRow[-1]) sdeShape = parseShape(sdeRow[-1]) try: assert_almost_equal(fShape, sdeShape, -1) # trim off shapes fRow = list(fRow[:-1]) sdeRow = list(sdeRow[:-1]) except AssertionError: changed = True break # trim microseconds since they can be off by one between file and sde databases for i in range(len(fRow)): if type(fRow[i]) is datetime: fRow = list(fRow) sdeRow = list(sdeRow) fRow[i] = fRow[i].replace(microsecond=0) try: sdeRow[i] = sdeRow[i].replace(microsecond=0) except: pass # compare all values except OBJECTID if fRow[1:] != sdeRow[1:]: changed = True break return changed
[ "def", "checkForChanges", "(", "f", ",", "sde", ",", "isTable", ")", ":", "# try simple feature count first", "fCount", "=", "int", "(", "arcpy", ".", "GetCount_management", "(", "f", ")", ".", "getOutput", "(", "0", ")", ")", "sdeCount", "=", "int", "(", "arcpy", ".", "GetCount_management", "(", "sde", ")", ".", "getOutput", "(", "0", ")", ")", "if", "fCount", "!=", "sdeCount", ":", "return", "True", "fields", "=", "[", "fld", ".", "name", "for", "fld", "in", "arcpy", ".", "ListFields", "(", "f", ")", "]", "# filter out shape fields", "if", "not", "isTable", ":", "fields", "=", "filter_fields", "(", "fields", ")", "d", "=", "arcpy", ".", "Describe", "(", "f", ")", "shapeType", "=", "d", ".", "shapeType", "if", "shapeType", "==", "'Polygon'", ":", "shapeToken", "=", "'SHAPE@AREA'", "elif", "shapeType", "==", "'Polyline'", ":", "shapeToken", "=", "'SHAPE@LENGTH'", "elif", "shapeType", "==", "'Point'", ":", "shapeToken", "=", "'SHAPE@XY'", "else", ":", "shapeToken", "=", "'SHAPE@JSON'", "fields", ".", "append", "(", "shapeToken", ")", "def", "parseShape", "(", "shapeValue", ")", ":", "if", "shapeValue", "is", "None", ":", "return", "0", "elif", "shapeType", "in", "[", "'Polygon'", ",", "'Polyline'", "]", ":", "return", "shapeValue", "elif", "shapeType", "==", "'Point'", ":", "if", "shapeValue", "[", "0", "]", "is", "not", "None", "and", "shapeValue", "[", "1", "]", "is", "not", "None", ":", "return", "shapeValue", "[", "0", "]", "+", "shapeValue", "[", "1", "]", "else", ":", "return", "0", "else", ":", "return", "shapeValue", "outputSR", "=", "arcpy", ".", "Describe", "(", "f", ")", ".", "spatialReference", "else", ":", "outputSR", "=", "None", "changed", "=", "False", "with", "arcpy", ".", "da", ".", "SearchCursor", "(", "f", ",", "fields", ",", "sql_clause", "=", "(", "None", ",", "'ORDER BY OBJECTID'", ")", ")", "as", "fCursor", ",", "arcpy", ".", "da", ".", "SearchCursor", "(", "sde", ",", "fields", ",", "sql_clause", "=", "(", "None", ",", "'ORDER BY OBJECTID'", ")", ",", "spatial_reference", "=", "outputSR", ")", "as", "sdeCursor", ":", "for", "fRow", ",", "sdeRow", "in", "izip", "(", "fCursor", ",", "sdeCursor", ")", ":", "if", "fRow", "!=", "sdeRow", ":", "# check shapes first", "if", "fRow", "[", "-", "1", "]", "!=", "sdeRow", "[", "-", "1", "]", "and", "not", "isTable", ":", "if", "shapeType", "not", "in", "[", "'Polygon'", ",", "'Polyline'", ",", "'Point'", "]", ":", "changed", "=", "True", "break", "fShape", "=", "parseShape", "(", "fRow", "[", "-", "1", "]", ")", "sdeShape", "=", "parseShape", "(", "sdeRow", "[", "-", "1", "]", ")", "try", ":", "assert_almost_equal", "(", "fShape", ",", "sdeShape", ",", "-", "1", ")", "# trim off shapes", "fRow", "=", "list", "(", "fRow", "[", ":", "-", "1", "]", ")", "sdeRow", "=", "list", "(", "sdeRow", "[", ":", "-", "1", "]", ")", "except", "AssertionError", ":", "changed", "=", "True", "break", "# trim microseconds since they can be off by one between file and sde databases", "for", "i", "in", "range", "(", "len", "(", "fRow", ")", ")", ":", "if", "type", "(", "fRow", "[", "i", "]", ")", "is", "datetime", ":", "fRow", "=", "list", "(", "fRow", ")", "sdeRow", "=", "list", "(", "sdeRow", ")", "fRow", "[", "i", "]", "=", "fRow", "[", "i", "]", ".", "replace", "(", "microsecond", "=", "0", ")", "try", ":", "sdeRow", "[", "i", "]", "=", "sdeRow", "[", "i", "]", ".", "replace", "(", "microsecond", "=", "0", ")", "except", ":", "pass", "# compare all values except OBJECTID", "if", "fRow", "[", "1", ":", "]", "!=", "sdeRow", "[", "1", ":", "]", ":", "changed", "=", "True", "break", "return", "changed" ]
returns False if there are no changes
[ "returns", "False", "if", "there", "are", "no", "changes" ]
python
train
35.6
Frojd/Fabrik
fabrik/ext/wpcli.py
https://github.com/Frojd/Fabrik/blob/9f2edbba97a7fd236b72a9b3010f6e912ab5c001/fabrik/ext/wpcli.py#L29-L67
def sync_remote_to_local(force="no"): """ Replace your remote db with your local Example: sync_remote_to_local:force=yes """ assert "local_wp_dir" in env, "Missing local_wp_dir in env" if force != "yes": message = "This will replace your local database with your "\ "remote, are you sure [y/n]" answer = prompt(message, "y") if answer != "y": logger.info("Sync stopped") return init_tasks() # Bootstrap fabrik remote_file = "sync_%s.sql" % int(time.time()*1000) remote_path = "/tmp/%s" % remote_file with env.cd(paths.get_current_path()): env.run("wp db export %s" % remote_path) local_wp_dir = env.local_wp_dir local_path = "/tmp/%s" % remote_file # Download sync file get(remote_path, local_path) with lcd(local_wp_dir): elocal("wp db import %s" % local_path) # Cleanup env.run("rm %s" % remote_path) elocal("rm %s" % local_path)
[ "def", "sync_remote_to_local", "(", "force", "=", "\"no\"", ")", ":", "assert", "\"local_wp_dir\"", "in", "env", ",", "\"Missing local_wp_dir in env\"", "if", "force", "!=", "\"yes\"", ":", "message", "=", "\"This will replace your local database with your \"", "\"remote, are you sure [y/n]\"", "answer", "=", "prompt", "(", "message", ",", "\"y\"", ")", "if", "answer", "!=", "\"y\"", ":", "logger", ".", "info", "(", "\"Sync stopped\"", ")", "return", "init_tasks", "(", ")", "# Bootstrap fabrik", "remote_file", "=", "\"sync_%s.sql\"", "%", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "remote_path", "=", "\"/tmp/%s\"", "%", "remote_file", "with", "env", ".", "cd", "(", "paths", ".", "get_current_path", "(", ")", ")", ":", "env", ".", "run", "(", "\"wp db export %s\"", "%", "remote_path", ")", "local_wp_dir", "=", "env", ".", "local_wp_dir", "local_path", "=", "\"/tmp/%s\"", "%", "remote_file", "# Download sync file", "get", "(", "remote_path", ",", "local_path", ")", "with", "lcd", "(", "local_wp_dir", ")", ":", "elocal", "(", "\"wp db import %s\"", "%", "local_path", ")", "# Cleanup", "env", ".", "run", "(", "\"rm %s\"", "%", "remote_path", ")", "elocal", "(", "\"rm %s\"", "%", "local_path", ")" ]
Replace your remote db with your local Example: sync_remote_to_local:force=yes
[ "Replace", "your", "remote", "db", "with", "your", "local" ]
python
train
24.641026
google/grr
grr/client/grr_response_client/client_actions/admin.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/admin.py#L80-L92
def Run(self, unused_arg): """Run the kill.""" # Send a message back to the service to say that we are about to shutdown. reply = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK) # Queue up the response message, jump the queue. self.SendReply(reply, message_type=rdf_flows.GrrMessage.Type.STATUS) # Give the http thread some time to send the reply. self.grr_worker.Sleep(10) # Die ourselves. logging.info("Dying on request.") os._exit(242)
[ "def", "Run", "(", "self", ",", "unused_arg", ")", ":", "# Send a message back to the service to say that we are about to shutdown.", "reply", "=", "rdf_flows", ".", "GrrStatus", "(", "status", "=", "rdf_flows", ".", "GrrStatus", ".", "ReturnedStatus", ".", "OK", ")", "# Queue up the response message, jump the queue.", "self", ".", "SendReply", "(", "reply", ",", "message_type", "=", "rdf_flows", ".", "GrrMessage", ".", "Type", ".", "STATUS", ")", "# Give the http thread some time to send the reply.", "self", ".", "grr_worker", ".", "Sleep", "(", "10", ")", "# Die ourselves.", "logging", ".", "info", "(", "\"Dying on request.\"", ")", "os", ".", "_exit", "(", "242", ")" ]
Run the kill.
[ "Run", "the", "kill", "." ]
python
train
37.384615
SuLab/WikidataIntegrator
wikidataintegrator/wdi_core.py
https://github.com/SuLab/WikidataIntegrator/blob/8ceb2ed1c08fec070ec9edfcf7db7b8691481b62/wikidataintegrator/wdi_core.py#L831-L854
def set_description(self, description, lang='en'): """ Set the description for a WD item in a certain language :param description: The description of the item in a certain language :type description: str :param lang: The language a description should be set for. :type lang: str :return: None """ if self.fast_run and not self.require_write: self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id, lang_data=[description], lang=lang, lang_data_type='description') if self.require_write: self.init_data_load() else: return if 'descriptions' not in self.wd_json_representation: self.wd_json_representation['descriptions'] = {} self.wd_json_representation['descriptions'][lang] = { 'language': lang, 'value': description }
[ "def", "set_description", "(", "self", ",", "description", ",", "lang", "=", "'en'", ")", ":", "if", "self", ".", "fast_run", "and", "not", "self", ".", "require_write", ":", "self", ".", "require_write", "=", "self", ".", "fast_run_container", ".", "check_language_data", "(", "qid", "=", "self", ".", "wd_item_id", ",", "lang_data", "=", "[", "description", "]", ",", "lang", "=", "lang", ",", "lang_data_type", "=", "'description'", ")", "if", "self", ".", "require_write", ":", "self", ".", "init_data_load", "(", ")", "else", ":", "return", "if", "'descriptions'", "not", "in", "self", ".", "wd_json_representation", ":", "self", ".", "wd_json_representation", "[", "'descriptions'", "]", "=", "{", "}", "self", ".", "wd_json_representation", "[", "'descriptions'", "]", "[", "lang", "]", "=", "{", "'language'", ":", "lang", ",", "'value'", ":", "description", "}" ]
Set the description for a WD item in a certain language :param description: The description of the item in a certain language :type description: str :param lang: The language a description should be set for. :type lang: str :return: None
[ "Set", "the", "description", "for", "a", "WD", "item", "in", "a", "certain", "language", ":", "param", "description", ":", "The", "description", "of", "the", "item", "in", "a", "certain", "language", ":", "type", "description", ":", "str", ":", "param", "lang", ":", "The", "language", "a", "description", "should", "be", "set", "for", ".", ":", "type", "lang", ":", "str", ":", "return", ":", "None" ]
python
train
45.041667
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L3924-L3946
def get_gradebook_ids_by_gradebook_column(self, gradebook_column_id): """Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``. arg: gradebook_column_id (osid.id.Id): ``Id`` of a ``GradebookColumn`` return: (osid.id.IdList) - list of gradebook ``Ids`` raise: NotFound - ``gradebook_column_id`` is not found raise: NullArgument - ``gradebook_column_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('GRADING', local=True) lookup_session = mgr.get_gradebook_column_lookup_session(proxy=self._proxy) lookup_session.use_federated_gradebook_view() gradebook_column = lookup_session.get_gradebook_column(gradebook_column_id) id_list = [] for idstr in gradebook_column._my_map['assignedGradebookIds']: id_list.append(Id(idstr)) return IdList(id_list)
[ "def", "get_gradebook_ids_by_gradebook_column", "(", "self", ",", "gradebook_column_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_bin_ids_by_resource", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'GRADING'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_gradebook_column_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "use_federated_gradebook_view", "(", ")", "gradebook_column", "=", "lookup_session", ".", "get_gradebook_column", "(", "gradebook_column_id", ")", "id_list", "=", "[", "]", "for", "idstr", "in", "gradebook_column", ".", "_my_map", "[", "'assignedGradebookIds'", "]", ":", "id_list", ".", "append", "(", "Id", "(", "idstr", ")", ")", "return", "IdList", "(", "id_list", ")" ]
Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradebookColumn``. arg: gradebook_column_id (osid.id.Id): ``Id`` of a ``GradebookColumn`` return: (osid.id.IdList) - list of gradebook ``Ids`` raise: NotFound - ``gradebook_column_id`` is not found raise: NullArgument - ``gradebook_column_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Gradebook", "Ids", "mapped", "to", "a", "GradebookColumn", "." ]
python
train
51.086957
ckcollab/polished
polished/decorators.py
https://github.com/ckcollab/polished/blob/5a00b2fbe569bc957d1647c0849fd344db29b644/polished/decorators.py#L5-L28
def polish(commit_indexes=None, urls=None): ''' Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots For example, if you have 10 commits in a row where static file links were broken, you could re-write the html in memory as it is interpreted. Keyword arguments: commit_indexes -- A list of indexes to apply the wrapped function to url -- A list of URLs to apply the wrapped function to ''' def decorator(f): if commit_indexes: f.polish_commit_indexes = commit_indexes if urls: f.polish_urls = urls @wraps(f) def wrappee(*args, **kwargs): return f(*args, **kwargs) return wrappee return decorator
[ "def", "polish", "(", "commit_indexes", "=", "None", ",", "urls", "=", "None", ")", ":", "def", "decorator", "(", "f", ")", ":", "if", "commit_indexes", ":", "f", ".", "polish_commit_indexes", "=", "commit_indexes", "if", "urls", ":", "f", ".", "polish_urls", "=", "urls", "@", "wraps", "(", "f", ")", "def", "wrappee", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrappee", "return", "decorator" ]
Apply certain behaviors to commits or URLs that need polishing before they are ready for screenshots For example, if you have 10 commits in a row where static file links were broken, you could re-write the html in memory as it is interpreted. Keyword arguments: commit_indexes -- A list of indexes to apply the wrapped function to url -- A list of URLs to apply the wrapped function to
[ "Apply", "certain", "behaviors", "to", "commits", "or", "URLs", "that", "need", "polishing", "before", "they", "are", "ready", "for", "screenshots" ]
python
train
30.875
pantsbuild/pants
src/python/pants/reporting/reporting_server.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/reporting/reporting_server.py#L233-L256
def _partition_runs_by_day(self): """Split the runs by day, so we can display them grouped that way.""" run_infos = self._get_all_run_infos() for x in run_infos: ts = float(x['timestamp']) x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S') def date_text(dt): delta_days = (date.today() - dt).days if delta_days == 0: return 'Today' elif delta_days == 1: return 'Yesterday' elif delta_days < 7: return dt.strftime('%A') # Weekday name. else: d = dt.day % 10 suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th' return dt.strftime('%B %d') + suffix # E.g., October 30th. keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp'])) sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True) return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]} for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
[ "def", "_partition_runs_by_day", "(", "self", ")", ":", "run_infos", "=", "self", ".", "_get_all_run_infos", "(", ")", "for", "x", "in", "run_infos", ":", "ts", "=", "float", "(", "x", "[", "'timestamp'", "]", ")", "x", "[", "'time_of_day_text'", "]", "=", "datetime", ".", "fromtimestamp", "(", "ts", ")", ".", "strftime", "(", "'%H:%M:%S'", ")", "def", "date_text", "(", "dt", ")", ":", "delta_days", "=", "(", "date", ".", "today", "(", ")", "-", "dt", ")", ".", "days", "if", "delta_days", "==", "0", ":", "return", "'Today'", "elif", "delta_days", "==", "1", ":", "return", "'Yesterday'", "elif", "delta_days", "<", "7", ":", "return", "dt", ".", "strftime", "(", "'%A'", ")", "# Weekday name.", "else", ":", "d", "=", "dt", ".", "day", "%", "10", "suffix", "=", "'st'", "if", "d", "==", "1", "else", "'nd'", "if", "d", "==", "2", "else", "'rd'", "if", "d", "==", "3", "else", "'th'", "return", "dt", ".", "strftime", "(", "'%B %d'", ")", "+", "suffix", "# E.g., October 30th.", "keyfunc", "=", "lambda", "x", ":", "datetime", ".", "fromtimestamp", "(", "float", "(", "x", "[", "'timestamp'", "]", ")", ")", "sorted_run_infos", "=", "sorted", "(", "run_infos", ",", "key", "=", "keyfunc", ",", "reverse", "=", "True", ")", "return", "[", "{", "'date_text'", ":", "date_text", "(", "dt", ")", ",", "'run_infos'", ":", "[", "x", "for", "x", "in", "infos", "]", "}", "for", "dt", ",", "infos", "in", "itertools", ".", "groupby", "(", "sorted_run_infos", ",", "lambda", "x", ":", "keyfunc", "(", "x", ")", ".", "date", "(", ")", ")", "]" ]
Split the runs by day, so we can display them grouped that way.
[ "Split", "the", "runs", "by", "day", "so", "we", "can", "display", "them", "grouped", "that", "way", "." ]
python
train
41.833333
Alignak-monitoring/alignak
alignak/objects/service.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/service.py#L1257-L1273
def get_short_status(self, hosts, services): """Get the short status of this host :return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state :rtype: str """ mapping = { 0: "O", 1: "W", 2: "C", 3: "U", 4: "N", } if self.got_business_rule: return mapping.get(self.business_rule.get_state(hosts, services), "n/a") return mapping.get(self.state_id, "n/a")
[ "def", "get_short_status", "(", "self", ",", "hosts", ",", "services", ")", ":", "mapping", "=", "{", "0", ":", "\"O\"", ",", "1", ":", "\"W\"", ",", "2", ":", "\"C\"", ",", "3", ":", "\"U\"", ",", "4", ":", "\"N\"", ",", "}", "if", "self", ".", "got_business_rule", ":", "return", "mapping", ".", "get", "(", "self", ".", "business_rule", ".", "get_state", "(", "hosts", ",", "services", ")", ",", "\"n/a\"", ")", "return", "mapping", ".", "get", "(", "self", ".", "state_id", ",", "\"n/a\"", ")" ]
Get the short status of this host :return: "O", "W", "C", "U', or "n/a" based on service state_id or business_rule state :rtype: str
[ "Get", "the", "short", "status", "of", "this", "host" ]
python
train
29.470588
mozilla/taar
taar/recommenders/lazys3.py
https://github.com/mozilla/taar/blob/4002eb395f0b7ad837f1578e92d590e2cf82bdca/taar/recommenders/lazys3.py#L43-L54
def get(self, transform=None): """ Return the JSON defined at the S3 location in the constructor. The get method will reload the S3 object after the TTL has expired. Fetch the JSON object from cache or S3 if necessary """ if not self.has_expired() and self._cached_copy is not None: return self._cached_copy, False return self._refresh_cache(transform), True
[ "def", "get", "(", "self", ",", "transform", "=", "None", ")", ":", "if", "not", "self", ".", "has_expired", "(", ")", "and", "self", ".", "_cached_copy", "is", "not", "None", ":", "return", "self", ".", "_cached_copy", ",", "False", "return", "self", ".", "_refresh_cache", "(", "transform", ")", ",", "True" ]
Return the JSON defined at the S3 location in the constructor. The get method will reload the S3 object after the TTL has expired. Fetch the JSON object from cache or S3 if necessary
[ "Return", "the", "JSON", "defined", "at", "the", "S3", "location", "in", "the", "constructor", "." ]
python
train
35.416667
openstack/proliantutils
proliantutils/ilo/client.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/client.py#L330-L353
def set_iscsi_boot_info(self, mac, target_name, lun, ip_address, port='3260', auth_method=None, username=None, password=None): """Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param mac: The MAC of the NIC to be set with iSCSI information :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode. """ LOG.warning("'set_iscsi_boot_info' is deprecated. The 'MAC' parameter" "passed in is ignored. Use 'set_iscsi_info' instead.") return self._call_method('set_iscsi_info', target_name, lun, ip_address, port, auth_method, username, password)
[ "def", "set_iscsi_boot_info", "(", "self", ",", "mac", ",", "target_name", ",", "lun", ",", "ip_address", ",", "port", "=", "'3260'", ",", "auth_method", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "LOG", ".", "warning", "(", "\"'set_iscsi_boot_info' is deprecated. The 'MAC' parameter\"", "\"passed in is ignored. Use 'set_iscsi_info' instead.\"", ")", "return", "self", ".", "_call_method", "(", "'set_iscsi_info'", ",", "target_name", ",", "lun", ",", "ip_address", ",", "port", ",", "auth_method", ",", "username", ",", "password", ")" ]
Set iscsi details of the system in uefi boot mode. The initiator system is set with the target details like IQN, LUN, IP, Port etc. :param mac: The MAC of the NIC to be set with iSCSI information :param target_name: Target Name for iscsi. :param lun: logical unit number. :param ip_address: IP address of the target. :param port: port of the target. :param auth_method : either None or CHAP. :param username: CHAP Username for authentication. :param password: CHAP secret. :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedInBiosError, if the system is in the bios boot mode.
[ "Set", "iscsi", "details", "of", "the", "system", "in", "uefi", "boot", "mode", "." ]
python
train
51.416667
projecthamster/hamster
src/hamster/lib/stuff.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/stuff.py#L224-L234
def totals(iter, keyfunc, sumfunc): """groups items by field described in keyfunc and counts totals using value from sumfunc """ data = sorted(iter, key=keyfunc) res = {} for k, group in groupby(data, keyfunc): res[k] = sum([sumfunc(entry) for entry in group]) return res
[ "def", "totals", "(", "iter", ",", "keyfunc", ",", "sumfunc", ")", ":", "data", "=", "sorted", "(", "iter", ",", "key", "=", "keyfunc", ")", "res", "=", "{", "}", "for", "k", ",", "group", "in", "groupby", "(", "data", ",", "keyfunc", ")", ":", "res", "[", "k", "]", "=", "sum", "(", "[", "sumfunc", "(", "entry", ")", "for", "entry", "in", "group", "]", ")", "return", "res" ]
groups items by field described in keyfunc and counts totals using value from sumfunc
[ "groups", "items", "by", "field", "described", "in", "keyfunc", "and", "counts", "totals", "using", "value", "from", "sumfunc" ]
python
train
27.454545
matllubos/django-is-core
is_core/forms/widgets.py
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/forms/widgets.py#L412-L420
def is_restricted(self): """ Returns True or False according to number of objects in queryset. If queryset contains too much objects the widget will be restricted and won't be used select box with choices. """ return ( not hasattr(self.choices, 'queryset') or self.choices.queryset.count() > settings.FOREIGN_KEY_MAX_SELECBOX_ENTRIES )
[ "def", "is_restricted", "(", "self", ")", ":", "return", "(", "not", "hasattr", "(", "self", ".", "choices", ",", "'queryset'", ")", "or", "self", ".", "choices", ".", "queryset", ".", "count", "(", ")", ">", "settings", ".", "FOREIGN_KEY_MAX_SELECBOX_ENTRIES", ")" ]
Returns True or False according to number of objects in queryset. If queryset contains too much objects the widget will be restricted and won't be used select box with choices.
[ "Returns", "True", "or", "False", "according", "to", "number", "of", "objects", "in", "queryset", ".", "If", "queryset", "contains", "too", "much", "objects", "the", "widget", "will", "be", "restricted", "and", "won", "t", "be", "used", "select", "box", "with", "choices", "." ]
python
train
44.333333
gem/oq-engine
openquake/hazardlib/geo/point.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/point.py#L122-L137
def azimuth(self, point): """ Compute the azimuth (in decimal degrees) between this point and the given point. :param point: Destination point. :type point: Instance of :class:`Point` :returns: The azimuth, value in a range ``[0, 360)``. :rtype: float """ return geodetic.azimuth(self.longitude, self.latitude, point.longitude, point.latitude)
[ "def", "azimuth", "(", "self", ",", "point", ")", ":", "return", "geodetic", ".", "azimuth", "(", "self", ".", "longitude", ",", "self", ".", "latitude", ",", "point", ".", "longitude", ",", "point", ".", "latitude", ")" ]
Compute the azimuth (in decimal degrees) between this point and the given point. :param point: Destination point. :type point: Instance of :class:`Point` :returns: The azimuth, value in a range ``[0, 360)``. :rtype: float
[ "Compute", "the", "azimuth", "(", "in", "decimal", "degrees", ")", "between", "this", "point", "and", "the", "given", "point", "." ]
python
train
30.0625
gpoulter/fablib
fablib.py
https://github.com/gpoulter/fablib/blob/5d14c4d998f79dd1aa3207063c3d06e30e3e2bf9/fablib.py#L280-L288
def write_version(path, ref=None): """Update version file using git desribe""" with lcd(dirname(path)): version = make_version(ref) if (env.get('full') or not os.path.exists(path) or version != open(path).read().strip()): with open(path, 'w') as out: out.write(version + '\n') return version
[ "def", "write_version", "(", "path", ",", "ref", "=", "None", ")", ":", "with", "lcd", "(", "dirname", "(", "path", ")", ")", ":", "version", "=", "make_version", "(", "ref", ")", "if", "(", "env", ".", "get", "(", "'full'", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "path", ")", "or", "version", "!=", "open", "(", "path", ")", ".", "read", "(", ")", ".", "strip", "(", ")", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "version", "+", "'\\n'", ")", "return", "version" ]
Update version file using git desribe
[ "Update", "version", "file", "using", "git", "desribe" ]
python
train
37.666667
odlgroup/odl
odl/space/base_tensors.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/base_tensors.py#L179-L190
def real_space(self): """The space corresponding to this space's `real_dtype`. Raises ------ ValueError If `dtype` is not a numeric data type. """ if not is_numeric_dtype(self.dtype): raise ValueError( '`real_space` not defined for non-numeric `dtype`') return self.astype(self.real_dtype)
[ "def", "real_space", "(", "self", ")", ":", "if", "not", "is_numeric_dtype", "(", "self", ".", "dtype", ")", ":", "raise", "ValueError", "(", "'`real_space` not defined for non-numeric `dtype`'", ")", "return", "self", ".", "astype", "(", "self", ".", "real_dtype", ")" ]
The space corresponding to this space's `real_dtype`. Raises ------ ValueError If `dtype` is not a numeric data type.
[ "The", "space", "corresponding", "to", "this", "space", "s", "real_dtype", "." ]
python
train
31.25
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L305-L322
def _ast_to_code(self, node, **kwargs): """Convert an abstract syntax tree to python source code.""" if isinstance(node, OptreeNode): return self._ast_optree_node_to_code(node, **kwargs) elif isinstance(node, Identifier): return self._ast_identifier_to_code(node, **kwargs) elif isinstance(node, Terminal): return self._ast_terminal_to_code(node, **kwargs) elif isinstance(node, OptionGroup): return self._ast_option_group_to_code(node, **kwargs) elif isinstance(node, RepetitionGroup): return self._ast_repetition_group_to_code(node, **kwargs) elif isinstance(node, SpecialHandling): return self._ast_special_handling_to_code(node, **kwargs) elif isinstance(node, Number): return self._ast_number_to_code(node, **kwargs) else: raise Exception("Unhandled ast node: {0}".format(node))
[ "def", "_ast_to_code", "(", "self", ",", "node", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "node", ",", "OptreeNode", ")", ":", "return", "self", ".", "_ast_optree_node_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "Identifier", ")", ":", "return", "self", ".", "_ast_identifier_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "Terminal", ")", ":", "return", "self", ".", "_ast_terminal_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "OptionGroup", ")", ":", "return", "self", ".", "_ast_option_group_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "RepetitionGroup", ")", ":", "return", "self", ".", "_ast_repetition_group_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "SpecialHandling", ")", ":", "return", "self", ".", "_ast_special_handling_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "node", ",", "Number", ")", ":", "return", "self", ".", "_ast_number_to_code", "(", "node", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "Exception", "(", "\"Unhandled ast node: {0}\"", ".", "format", "(", "node", ")", ")" ]
Convert an abstract syntax tree to python source code.
[ "Convert", "an", "abstract", "syntax", "tree", "to", "python", "source", "code", "." ]
python
test
47.222222
ourway/auth
auth/CAS/authorization.py
https://github.com/ourway/auth/blob/f0d9676854dcec494add4fa086a9b2a3e4d8cea5/auth/CAS/authorization.py#L62-L66
def get_role_members(self, role): """get permissions of a user""" targetRoleDb = AuthGroup.objects(creator=self.client, role=role) members = AuthMembership.objects(groups__in=targetRoleDb).only('user') return json.loads(members.to_json())
[ "def", "get_role_members", "(", "self", ",", "role", ")", ":", "targetRoleDb", "=", "AuthGroup", ".", "objects", "(", "creator", "=", "self", ".", "client", ",", "role", "=", "role", ")", "members", "=", "AuthMembership", ".", "objects", "(", "groups__in", "=", "targetRoleDb", ")", ".", "only", "(", "'user'", ")", "return", "json", ".", "loads", "(", "members", ".", "to_json", "(", ")", ")" ]
get permissions of a user
[ "get", "permissions", "of", "a", "user" ]
python
train
53.2
MacHu-GWU/single_file_module-project
sfm/dtree.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/dtree.py#L131-L142
def load(cls, path): """ load DictTree from json files. """ try: with open(path, "rb") as f: return cls(__data__=json.loads(f.read().decode("utf-8"))) except: pass with open(path, "rb") as f: return cls(__data__=pickle.load(f))
[ "def", "load", "(", "cls", ",", "path", ")", ":", "try", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "return", "cls", "(", "__data__", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", "except", ":", "pass", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "return", "cls", "(", "__data__", "=", "pickle", ".", "load", "(", "f", ")", ")" ]
load DictTree from json files.
[ "load", "DictTree", "from", "json", "files", "." ]
python
train
26.416667
rtfd/sphinxcontrib-dotnetdomain
sphinxcontrib/dotnetdomain.py
https://github.com/rtfd/sphinxcontrib-dotnetdomain/blob/fbc6a81b9993dc5d06866c4483593421b53b9a61/sphinxcontrib/dotnetdomain.py#L152-L208
def handle_signature(self, sig, signode): """Parses out pieces from construct signatures Parses out prefix and argument list from construct definition. This is assuming that the .NET languages this will support will be in a common format, such as:: Namespace.Class.method(argument, argument, ...) The namespace and class will be determined by the nesting of rST directives. Returns Altered :py:data:`signode` with attributes corrected for rST nesting/etc """ try: sig = self.parse_signature(sig.strip()) except ValueError: self.env.warn(self.env.docname, 'Parsing signature failed: "{}"'.format(sig), self.lineno) raise prefix = self.env.ref_context.get('dn:prefix', None) if prefix is not None: sig.prefix = prefix signode['object'] = sig.member signode['prefix'] = sig.prefix signode['fullname'] = sig.full_name() # Prefix modifiers if self.display_prefix: signode += addnodes.desc_annotation(self.display_prefix, self.display_prefix) for prefix in ['public', 'protected', 'static']: if prefix in self.options: signode += addnodes.desc_annotation(prefix + ' ', prefix + ' ') # Show prefix only on shorter declarations if sig.prefix is not None and not self.has_arguments: signode += addnodes.desc_addname(sig.prefix + '.', sig.prefix + '.') signode += addnodes.desc_name(sig.member, sig.member) if self.has_arguments: if not sig.arguments: signode += addnodes.desc_parameterlist() else: # TODO replace this _pseudo_parse_arglist(signode, ', '.join(sig.arguments)) if isinstance(self, DotNetObjectNested): return sig.full_name(), sig.full_name() return sig.full_name(), sig.prefix
[ "def", "handle_signature", "(", "self", ",", "sig", ",", "signode", ")", ":", "try", ":", "sig", "=", "self", ".", "parse_signature", "(", "sig", ".", "strip", "(", ")", ")", "except", "ValueError", ":", "self", ".", "env", ".", "warn", "(", "self", ".", "env", ".", "docname", ",", "'Parsing signature failed: \"{}\"'", ".", "format", "(", "sig", ")", ",", "self", ".", "lineno", ")", "raise", "prefix", "=", "self", ".", "env", ".", "ref_context", ".", "get", "(", "'dn:prefix'", ",", "None", ")", "if", "prefix", "is", "not", "None", ":", "sig", ".", "prefix", "=", "prefix", "signode", "[", "'object'", "]", "=", "sig", ".", "member", "signode", "[", "'prefix'", "]", "=", "sig", ".", "prefix", "signode", "[", "'fullname'", "]", "=", "sig", ".", "full_name", "(", ")", "# Prefix modifiers", "if", "self", ".", "display_prefix", ":", "signode", "+=", "addnodes", ".", "desc_annotation", "(", "self", ".", "display_prefix", ",", "self", ".", "display_prefix", ")", "for", "prefix", "in", "[", "'public'", ",", "'protected'", ",", "'static'", "]", ":", "if", "prefix", "in", "self", ".", "options", ":", "signode", "+=", "addnodes", ".", "desc_annotation", "(", "prefix", "+", "' '", ",", "prefix", "+", "' '", ")", "# Show prefix only on shorter declarations", "if", "sig", ".", "prefix", "is", "not", "None", "and", "not", "self", ".", "has_arguments", ":", "signode", "+=", "addnodes", ".", "desc_addname", "(", "sig", ".", "prefix", "+", "'.'", ",", "sig", ".", "prefix", "+", "'.'", ")", "signode", "+=", "addnodes", ".", "desc_name", "(", "sig", ".", "member", ",", "sig", ".", "member", ")", "if", "self", ".", "has_arguments", ":", "if", "not", "sig", ".", "arguments", ":", "signode", "+=", "addnodes", ".", "desc_parameterlist", "(", ")", "else", ":", "# TODO replace this", "_pseudo_parse_arglist", "(", "signode", ",", "', '", ".", "join", "(", "sig", ".", "arguments", ")", ")", "if", "isinstance", "(", "self", ",", "DotNetObjectNested", ")", ":", "return", "sig", ".", "full_name", "(", ")", ",", "sig", ".", "full_name", "(", ")", "return", "sig", ".", "full_name", "(", ")", ",", "sig", ".", "prefix" ]
Parses out pieces from construct signatures Parses out prefix and argument list from construct definition. This is assuming that the .NET languages this will support will be in a common format, such as:: Namespace.Class.method(argument, argument, ...) The namespace and class will be determined by the nesting of rST directives. Returns Altered :py:data:`signode` with attributes corrected for rST nesting/etc
[ "Parses", "out", "pieces", "from", "construct", "signatures" ]
python
train
36.842105
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/_protocol/leaser.py#L92-L159
def maintain_leases(self): """Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats. """ while self._manager.is_active and not self._stop_event.is_set(): # Determine the appropriate duration for the lease. This is # based off of how long previous messages have taken to ack, with # a sensible default and within the ranges allowed by Pub/Sub. p99 = self._manager.ack_histogram.percentile(99) _LOGGER.debug("The current p99 value is %d seconds.", p99) # Make a copy of the leased messages. This is needed because it's # possible for another thread to modify the dictionary while # we're iterating over it. leased_messages = copy.copy(self._leased_messages) # Drop any leases that are well beyond max lease time. This # ensures that in the event of a badly behaving actor, we can # drop messages and allow Pub/Sub to resend them. cutoff = time.time() - self._manager.flow_control.max_lease_duration to_drop = [ requests.DropRequest(ack_id, item.size) for ack_id, item in six.iteritems(leased_messages) if item.added_time < cutoff ] if to_drop: _LOGGER.warning( "Dropping %s items because they were leased too long.", len(to_drop) ) self._manager.dispatcher.drop(to_drop) # Remove dropped items from our copy of the leased messages (they # have already been removed from the real one by # self._manager.drop(), which calls self.remove()). for item in to_drop: leased_messages.pop(item.ack_id) # Create a streaming pull request. # We do not actually call `modify_ack_deadline` over and over # because it is more efficient to make a single request. ack_ids = leased_messages.keys() if ack_ids: _LOGGER.debug("Renewing lease for %d ack IDs.", len(ack_ids)) # NOTE: This may not work as expected if ``consumer.active`` # has changed since we checked it. An implementation # without any sort of race condition would require a # way for ``send_request`` to fail when the consumer # is inactive. self._manager.dispatcher.modify_ack_deadline( [requests.ModAckRequest(ack_id, p99) for ack_id in ack_ids] ) # Now wait an appropriate period of time and do this again. # # We determine the appropriate period of time based on a random # period between 0 seconds and 90% of the lease. This use of # jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases # where there are many clients. snooze = random.uniform(0.0, p99 * 0.9) _LOGGER.debug("Snoozing lease management for %f seconds.", snooze) self._stop_event.wait(timeout=snooze) _LOGGER.info("%s exiting.", _LEASE_WORKER_NAME)
[ "def", "maintain_leases", "(", "self", ")", ":", "while", "self", ".", "_manager", ".", "is_active", "and", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "# Determine the appropriate duration for the lease. This is", "# based off of how long previous messages have taken to ack, with", "# a sensible default and within the ranges allowed by Pub/Sub.", "p99", "=", "self", ".", "_manager", ".", "ack_histogram", ".", "percentile", "(", "99", ")", "_LOGGER", ".", "debug", "(", "\"The current p99 value is %d seconds.\"", ",", "p99", ")", "# Make a copy of the leased messages. This is needed because it's", "# possible for another thread to modify the dictionary while", "# we're iterating over it.", "leased_messages", "=", "copy", ".", "copy", "(", "self", ".", "_leased_messages", ")", "# Drop any leases that are well beyond max lease time. This", "# ensures that in the event of a badly behaving actor, we can", "# drop messages and allow Pub/Sub to resend them.", "cutoff", "=", "time", ".", "time", "(", ")", "-", "self", ".", "_manager", ".", "flow_control", ".", "max_lease_duration", "to_drop", "=", "[", "requests", ".", "DropRequest", "(", "ack_id", ",", "item", ".", "size", ")", "for", "ack_id", ",", "item", "in", "six", ".", "iteritems", "(", "leased_messages", ")", "if", "item", ".", "added_time", "<", "cutoff", "]", "if", "to_drop", ":", "_LOGGER", ".", "warning", "(", "\"Dropping %s items because they were leased too long.\"", ",", "len", "(", "to_drop", ")", ")", "self", ".", "_manager", ".", "dispatcher", ".", "drop", "(", "to_drop", ")", "# Remove dropped items from our copy of the leased messages (they", "# have already been removed from the real one by", "# self._manager.drop(), which calls self.remove()).", "for", "item", "in", "to_drop", ":", "leased_messages", ".", "pop", "(", "item", ".", "ack_id", ")", "# Create a streaming pull request.", "# We do not actually call `modify_ack_deadline` over and over", "# because it is more efficient to make a single request.", "ack_ids", "=", "leased_messages", ".", "keys", "(", ")", "if", "ack_ids", ":", "_LOGGER", ".", "debug", "(", "\"Renewing lease for %d ack IDs.\"", ",", "len", "(", "ack_ids", ")", ")", "# NOTE: This may not work as expected if ``consumer.active``", "# has changed since we checked it. An implementation", "# without any sort of race condition would require a", "# way for ``send_request`` to fail when the consumer", "# is inactive.", "self", ".", "_manager", ".", "dispatcher", ".", "modify_ack_deadline", "(", "[", "requests", ".", "ModAckRequest", "(", "ack_id", ",", "p99", ")", "for", "ack_id", "in", "ack_ids", "]", ")", "# Now wait an appropriate period of time and do this again.", "#", "# We determine the appropriate period of time based on a random", "# period between 0 seconds and 90% of the lease. This use of", "# jitter (http://bit.ly/2s2ekL7) helps decrease contention in cases", "# where there are many clients.", "snooze", "=", "random", ".", "uniform", "(", "0.0", ",", "p99", "*", "0.9", ")", "_LOGGER", ".", "debug", "(", "\"Snoozing lease management for %f seconds.\"", ",", "snooze", ")", "self", ".", "_stop_event", ".", "wait", "(", "timeout", "=", "snooze", ")", "_LOGGER", ".", "info", "(", "\"%s exiting.\"", ",", "_LEASE_WORKER_NAME", ")" ]
Maintain all of the leases being managed. This method modifies the ack deadline for all of the managed ack IDs, then waits for most of that time (but with jitter), and repeats.
[ "Maintain", "all", "of", "the", "leases", "being", "managed", "." ]
python
train
48.602941
rigetti/pyquil
pyquil/noise.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/noise.py#L154-L165
def from_dict(d): """ Re-create the noise model from a dictionary representation. :param Dict[str,Any] d: The dictionary representation. :return: The restored noise model. :rtype: NoiseModel """ return NoiseModel( gates=[KrausModel.from_dict(t) for t in d["gates"]], assignment_probs={int(qid): np.array(a) for qid, a in d["assignment_probs"].items()}, )
[ "def", "from_dict", "(", "d", ")", ":", "return", "NoiseModel", "(", "gates", "=", "[", "KrausModel", ".", "from_dict", "(", "t", ")", "for", "t", "in", "d", "[", "\"gates\"", "]", "]", ",", "assignment_probs", "=", "{", "int", "(", "qid", ")", ":", "np", ".", "array", "(", "a", ")", "for", "qid", ",", "a", "in", "d", "[", "\"assignment_probs\"", "]", ".", "items", "(", ")", "}", ",", ")" ]
Re-create the noise model from a dictionary representation. :param Dict[str,Any] d: The dictionary representation. :return: The restored noise model. :rtype: NoiseModel
[ "Re", "-", "create", "the", "noise", "model", "from", "a", "dictionary", "representation", "." ]
python
train
36
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L627-L635
def eof(): '''Parser EOF flag of a string.''' @Parser def eof_parser(text, index=0): if index >= len(text): return Value.success(index, None) else: return Value.failure(index, 'EOF') return eof_parser
[ "def", "eof", "(", ")", ":", "@", "Parser", "def", "eof_parser", "(", "text", ",", "index", "=", "0", ")", ":", "if", "index", ">=", "len", "(", "text", ")", ":", "return", "Value", ".", "success", "(", "index", ",", "None", ")", "else", ":", "return", "Value", ".", "failure", "(", "index", ",", "'EOF'", ")", "return", "eof_parser" ]
Parser EOF flag of a string.
[ "Parser", "EOF", "flag", "of", "a", "string", "." ]
python
train
27.555556
timkpaine/pyEX
pyEX/marketdata/http.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/marketdata/http.py#L5-L22
def tops(symbols=None, token='', version=''): '''TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book. TOPS is ideal for developers needing both quote and trade data. https://iexcloud.io/docs/api/#tops Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' symbols = _strToList(symbols) if symbols: return _getJson('tops?symbols=' + ','.join(symbols) + '%2b', token, version) return _getJson('tops', token, version)
[ "def", "tops", "(", "symbols", "=", "None", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "symbols", "=", "_strToList", "(", "symbols", ")", "if", "symbols", ":", "return", "_getJson", "(", "'tops?symbols='", "+", "','", ".", "join", "(", "symbols", ")", "+", "'%2b'", ",", "token", ",", "version", ")", "return", "_getJson", "(", "'tops'", ",", "token", ",", "version", ")" ]
TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book. TOPS is ideal for developers needing both quote and trade data. https://iexcloud.io/docs/api/#tops Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
[ "TOPS", "provides", "IEX’s", "aggregated", "best", "quoted", "bid", "and", "offer", "position", "in", "near", "real", "time", "for", "all", "securities", "on", "IEX’s", "displayed", "limit", "order", "book", ".", "TOPS", "is", "ideal", "for", "developers", "needing", "both", "quote", "and", "trade", "data", "." ]
python
valid
35.222222
jazzband/django-pipeline
pipeline/compressors/__init__.py
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/compressors/__init__.py#L162-L174
def embeddable(self, path, variant): """Is the asset embeddable ?""" name, ext = os.path.splitext(path) font = ext in FONT_EXTS if not variant: return False if not (re.search(settings.EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)): return False if ext not in EMBED_EXTS: return False if not (font or len(self.encoded_content(path)) < settings.EMBED_MAX_IMAGE_SIZE): return False return True
[ "def", "embeddable", "(", "self", ",", "path", ",", "variant", ")", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "font", "=", "ext", "in", "FONT_EXTS", "if", "not", "variant", ":", "return", "False", "if", "not", "(", "re", ".", "search", "(", "settings", ".", "EMBED_PATH", ",", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ")", "and", "self", ".", "storage", ".", "exists", "(", "path", ")", ")", ":", "return", "False", "if", "ext", "not", "in", "EMBED_EXTS", ":", "return", "False", "if", "not", "(", "font", "or", "len", "(", "self", ".", "encoded_content", "(", "path", ")", ")", "<", "settings", ".", "EMBED_MAX_IMAGE_SIZE", ")", ":", "return", "False", "return", "True" ]
Is the asset embeddable ?
[ "Is", "the", "asset", "embeddable", "?" ]
python
train
39.307692
marcharper/python-ternary
ternary/ternary_axes_subplot.py
https://github.com/marcharper/python-ternary/blob/a4bef393ec9df130d4b55707293c750498a01843/ternary/ternary_axes_subplot.py#L215-L236
def left_corner_label(self, label, position=None, rotation=0, offset=0.08, **kwargs): """ Sets the label on the left corner (complements right axis.) Parameters ---------- label: string The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, 0 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib. """ if not position: position = (-offset / 2, offset / 2, 0) self._corner_labels["left"] = (label, position, rotation, kwargs)
[ "def", "left_corner_label", "(", "self", ",", "label", ",", "position", "=", "None", ",", "rotation", "=", "0", ",", "offset", "=", "0.08", ",", "*", "*", "kwargs", ")", ":", "if", "not", "position", ":", "position", "=", "(", "-", "offset", "/", "2", ",", "offset", "/", "2", ",", "0", ")", "self", ".", "_corner_labels", "[", "\"left\"", "]", "=", "(", "label", ",", "position", ",", "rotation", ",", "kwargs", ")" ]
Sets the label on the left corner (complements right axis.) Parameters ---------- label: string The axis label position: 3-Tuple of floats, None The position of the text label rotation: float, 0 The angle of rotation of the label offset: float, Used to compute the distance of the label from the axis kwargs: Any kwargs to pass through to matplotlib.
[ "Sets", "the", "label", "on", "the", "left", "corner", "(", "complements", "right", "axis", ".", ")" ]
python
train
33.727273
GNS3/gns3-server
gns3server/compute/dynamips/nodes/router.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/router.py#L1049-L1057
def get_slot_bindings(self): """ Returns slot bindings. :returns: slot bindings (adapter names) list """ slot_bindings = yield from self._hypervisor.send('vm slot_bindings "{}"'.format(self._name)) return slot_bindings
[ "def", "get_slot_bindings", "(", "self", ")", ":", "slot_bindings", "=", "yield", "from", "self", ".", "_hypervisor", ".", "send", "(", "'vm slot_bindings \"{}\"'", ".", "format", "(", "self", ".", "_name", ")", ")", "return", "slot_bindings" ]
Returns slot bindings. :returns: slot bindings (adapter names) list
[ "Returns", "slot", "bindings", "." ]
python
train
28.888889
fedora-infra/fedora-messaging
fedora_messaging/api.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/api.py#L66-L121
def twisted_consume(callback, bindings=None, queues=None): """ Start a consumer using the provided callback and run it using the Twisted event loop (reactor). .. note:: Callbacks run in a Twisted-managed thread pool using the :func:`twisted.internet.threads.deferToThread` API to avoid them blocking the event loop. If you wish to use Twisted APIs in your callback you must use the :func:`twisted.internet.threads.blockingCallFromThread` or :class:`twisted.internet.interfaces.IReactorFromThreads` APIs. This API expects the caller to start the reactor. Args: callback (callable): A callable object that accepts one positional argument, a :class:`.Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue to declare and consume from. Each key in this dictionary should be a queue name to declare, and each value should be a dictionary with the "durable", "auto_delete", "exclusive", and "arguments" keys. Returns: twisted.internet.defer.Deferred: A deferred that fires with the list of one or more :class:`.Consumer` objects. Each consumer object has a :attr:`.Consumer.result` instance variable that is a Deferred that fires or errors when the consumer halts. Note that this API is meant to survive network problems, so consuming will continue until :meth:`.Consumer.cancel` is called or a fatal server error occurs. The deferred returned by this function may error back with a :class:`fedora_messaging.exceptions.BadDeclaration` if queues or bindings cannot be declared on the broker, a :class:`fedora_messaging.exceptions.PermissionException` if the user doesn't have access to the queue, or :class:`fedora_messaging.exceptions.ConnectionException` if the TLS or AMQP handshake fails. """ if isinstance(bindings, dict): bindings = [bindings] callback = _check_callback(callback) global _twisted_service if _twisted_service is None: _twisted_service = service.FedoraMessagingServiceV2(config.conf["amqp_url"]) reactor.callWhenRunning(_twisted_service.startService) # Twisted is killing the underlying connection before stopService gets # called, so we need to add it as a pre-shutdown event to gracefully # finish up messages in progress. reactor.addSystemEventTrigger( "before", "shutdown", _twisted_service.stopService ) return _twisted_service._service.factory.consume(callback, bindings, queues)
[ "def", "twisted_consume", "(", "callback", ",", "bindings", "=", "None", ",", "queues", "=", "None", ")", ":", "if", "isinstance", "(", "bindings", ",", "dict", ")", ":", "bindings", "=", "[", "bindings", "]", "callback", "=", "_check_callback", "(", "callback", ")", "global", "_twisted_service", "if", "_twisted_service", "is", "None", ":", "_twisted_service", "=", "service", ".", "FedoraMessagingServiceV2", "(", "config", ".", "conf", "[", "\"amqp_url\"", "]", ")", "reactor", ".", "callWhenRunning", "(", "_twisted_service", ".", "startService", ")", "# Twisted is killing the underlying connection before stopService gets", "# called, so we need to add it as a pre-shutdown event to gracefully", "# finish up messages in progress.", "reactor", ".", "addSystemEventTrigger", "(", "\"before\"", ",", "\"shutdown\"", ",", "_twisted_service", ".", "stopService", ")", "return", "_twisted_service", ".", "_service", ".", "factory", ".", "consume", "(", "callback", ",", "bindings", ",", "queues", ")" ]
Start a consumer using the provided callback and run it using the Twisted event loop (reactor). .. note:: Callbacks run in a Twisted-managed thread pool using the :func:`twisted.internet.threads.deferToThread` API to avoid them blocking the event loop. If you wish to use Twisted APIs in your callback you must use the :func:`twisted.internet.threads.blockingCallFromThread` or :class:`twisted.internet.interfaces.IReactorFromThreads` APIs. This API expects the caller to start the reactor. Args: callback (callable): A callable object that accepts one positional argument, a :class:`.Message` or a class object that implements the ``__call__`` method. The class will be instantiated before use. bindings (dict or list of dict): Bindings to declare before consuming. This should be the same format as the :ref:`conf-bindings` configuration. queues (dict): The queue to declare and consume from. Each key in this dictionary should be a queue name to declare, and each value should be a dictionary with the "durable", "auto_delete", "exclusive", and "arguments" keys. Returns: twisted.internet.defer.Deferred: A deferred that fires with the list of one or more :class:`.Consumer` objects. Each consumer object has a :attr:`.Consumer.result` instance variable that is a Deferred that fires or errors when the consumer halts. Note that this API is meant to survive network problems, so consuming will continue until :meth:`.Consumer.cancel` is called or a fatal server error occurs. The deferred returned by this function may error back with a :class:`fedora_messaging.exceptions.BadDeclaration` if queues or bindings cannot be declared on the broker, a :class:`fedora_messaging.exceptions.PermissionException` if the user doesn't have access to the queue, or :class:`fedora_messaging.exceptions.ConnectionException` if the TLS or AMQP handshake fails.
[ "Start", "a", "consumer", "using", "the", "provided", "callback", "and", "run", "it", "using", "the", "Twisted", "event", "loop", "(", "reactor", ")", "." ]
python
train
51.714286