repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ns1/ns1-python
ns1/ipam.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/ipam.py#L179-L221
def load(self, callback=None, errback=None, reload=False): """ Load address data from the API. """ if not reload and self.data: raise AddressException('Address already loaded') def success(result, *args): self.data = result self.id = result['id'] self.prefix = result['prefix'] self.type = result['type'] self.network = Network(self.config, id=result['network_id']) # self.scope_group = Scopegroup(config=self.config, id=result['scope_group_id']) NYI if self.type != 'host': self.report = self._rest.report(self.id) children = self._rest.retrieve_children(self.id) self.children = [Address(self.config, id=child['id']) for child in children if len(children) > 0] try: parent = self._rest.retrieve_parent(self.id) self.parent = Address(self.config, id=parent['id']) except ResourceException: pass if callback: return callback(self) else: return self if self.id is None: if self.prefix is None or self.type is None or self.network is None: raise AddressException('Must at least specify an id or prefix, type and network') else: network_id = self.network.id try: self.id = [address for address in self._rest.list() if address['prefix'] == self.prefix and address[ 'type'] == self.type and address['network_id'] == network_id][0]['id'] except IndexError: raise AddressException("Could not find address by prefix. It may not exist, or is a child address. " "Use the topmost parent prefix or specify ID") return self._rest.retrieve(self.id, callback=success, errback=errback)
[ "def", "load", "(", "self", ",", "callback", "=", "None", ",", "errback", "=", "None", ",", "reload", "=", "False", ")", ":", "if", "not", "reload", "and", "self", ".", "data", ":", "raise", "AddressException", "(", "'Address already loaded'", ")", "def", "success", "(", "result", ",", "*", "args", ")", ":", "self", ".", "data", "=", "result", "self", ".", "id", "=", "result", "[", "'id'", "]", "self", ".", "prefix", "=", "result", "[", "'prefix'", "]", "self", ".", "type", "=", "result", "[", "'type'", "]", "self", ".", "network", "=", "Network", "(", "self", ".", "config", ",", "id", "=", "result", "[", "'network_id'", "]", ")", "# self.scope_group = Scopegroup(config=self.config, id=result['scope_group_id']) NYI", "if", "self", ".", "type", "!=", "'host'", ":", "self", ".", "report", "=", "self", ".", "_rest", ".", "report", "(", "self", ".", "id", ")", "children", "=", "self", ".", "_rest", ".", "retrieve_children", "(", "self", ".", "id", ")", "self", ".", "children", "=", "[", "Address", "(", "self", ".", "config", ",", "id", "=", "child", "[", "'id'", "]", ")", "for", "child", "in", "children", "if", "len", "(", "children", ")", ">", "0", "]", "try", ":", "parent", "=", "self", ".", "_rest", ".", "retrieve_parent", "(", "self", ".", "id", ")", "self", ".", "parent", "=", "Address", "(", "self", ".", "config", ",", "id", "=", "parent", "[", "'id'", "]", ")", "except", "ResourceException", ":", "pass", "if", "callback", ":", "return", "callback", "(", "self", ")", "else", ":", "return", "self", "if", "self", ".", "id", "is", "None", ":", "if", "self", ".", "prefix", "is", "None", "or", "self", ".", "type", "is", "None", "or", "self", ".", "network", "is", "None", ":", "raise", "AddressException", "(", "'Must at least specify an id or prefix, type and network'", ")", "else", ":", "network_id", "=", "self", ".", "network", ".", "id", "try", ":", "self", ".", "id", "=", "[", "address", "for", "address", "in", "self", ".", "_rest", ".", "list", "(", ")", "if", "address", "[", "'prefix'", "]", "==", "self", ".", "prefix", "and", "address", "[", "'type'", "]", "==", "self", ".", "type", "and", "address", "[", "'network_id'", "]", "==", "network_id", "]", "[", "0", "]", "[", "'id'", "]", "except", "IndexError", ":", "raise", "AddressException", "(", "\"Could not find address by prefix. It may not exist, or is a child address. \"", "\"Use the topmost parent prefix or specify ID\"", ")", "return", "self", ".", "_rest", ".", "retrieve", "(", "self", ".", "id", ",", "callback", "=", "success", ",", "errback", "=", "errback", ")" ]
Load address data from the API.
[ "Load", "address", "data", "from", "the", "API", "." ]
python
train
mdiener/grace
grace/py27/pyjsdoc.py
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/pyjsdoc.py#L525-L536
def translate_links(self, text, in_comment=None): """ Turn all @link tags in `text` into HTML anchor tags. `in_comment` is the `CommentDoc` that contains the text, for relative method lookups. """ def replace_link(matchobj): ref = matchobj.group(1) return '<a href = "%s">%s</a>' % ( self.translate_ref_to_url(ref, in_comment), ref) return re.sub('{@link ([\w#]+)}', replace_link, text)
[ "def", "translate_links", "(", "self", ",", "text", ",", "in_comment", "=", "None", ")", ":", "def", "replace_link", "(", "matchobj", ")", ":", "ref", "=", "matchobj", ".", "group", "(", "1", ")", "return", "'<a href = \"%s\">%s</a>'", "%", "(", "self", ".", "translate_ref_to_url", "(", "ref", ",", "in_comment", ")", ",", "ref", ")", "return", "re", ".", "sub", "(", "'{@link ([\\w#]+)}'", ",", "replace_link", ",", "text", ")" ]
Turn all @link tags in `text` into HTML anchor tags. `in_comment` is the `CommentDoc` that contains the text, for relative method lookups.
[ "Turn", "all", "@link", "tags", "in", "text", "into", "HTML", "anchor", "tags", "." ]
python
train
bwohlberg/sporco
sporco/admm/rpca.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/rpca.py#L191-L199
def obfn_fvar(self): """Variable to be evaluated in computing regularisation term, depending on 'fEvalX' option value. """ if self.opt['fEvalX']: return self.X else: return self.cnst_c() - self.cnst_B(self.Y)
[ "def", "obfn_fvar", "(", "self", ")", ":", "if", "self", ".", "opt", "[", "'fEvalX'", "]", ":", "return", "self", ".", "X", "else", ":", "return", "self", ".", "cnst_c", "(", ")", "-", "self", ".", "cnst_B", "(", "self", ".", "Y", ")" ]
Variable to be evaluated in computing regularisation term, depending on 'fEvalX' option value.
[ "Variable", "to", "be", "evaluated", "in", "computing", "regularisation", "term", "depending", "on", "fEvalX", "option", "value", "." ]
python
train
OzymandiasTheGreat/python-libinput
libinput/device.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1814-L1832
def size(self): """The physical size of a device in mm, where meaningful. This property is only valid on devices with the required data, i.e. tablets, touchpads and touchscreens. For other devices this property raises :exc:`AssertionError`. Returns: (float, float): (Width, Height) in mm. Raises: AssertionError """ width = c_double(0) height = c_double(0) rc = self._libinput.libinput_device_get_size( self._handle, byref(width), byref(height)) assert rc == 0, 'This device does not provide size information' return width.value, height.value
[ "def", "size", "(", "self", ")", ":", "width", "=", "c_double", "(", "0", ")", "height", "=", "c_double", "(", "0", ")", "rc", "=", "self", ".", "_libinput", ".", "libinput_device_get_size", "(", "self", ".", "_handle", ",", "byref", "(", "width", ")", ",", "byref", "(", "height", ")", ")", "assert", "rc", "==", "0", ",", "'This device does not provide size information'", "return", "width", ".", "value", ",", "height", ".", "value" ]
The physical size of a device in mm, where meaningful. This property is only valid on devices with the required data, i.e. tablets, touchpads and touchscreens. For other devices this property raises :exc:`AssertionError`. Returns: (float, float): (Width, Height) in mm. Raises: AssertionError
[ "The", "physical", "size", "of", "a", "device", "in", "mm", "where", "meaningful", "." ]
python
train
Microsoft/nni
tools/nni_annotation/__init__.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_annotation/__init__.py#L77-L107
def expand_annotations(src_dir, dst_dir): """Expand annotations in user code. Return dst_dir if annotation detected; return src_dir if not. src_dir: directory path of user code (str) dst_dir: directory to place generated files (str) """ if src_dir[-1] == slash: src_dir = src_dir[:-1] if dst_dir[-1] == slash: dst_dir = dst_dir[:-1] annotated = False for src_subdir, dirs, files in os.walk(src_dir): assert src_subdir.startswith(src_dir) dst_subdir = src_subdir.replace(src_dir, dst_dir, 1) os.makedirs(dst_subdir, exist_ok=True) for file_name in files: src_path = os.path.join(src_subdir, file_name) dst_path = os.path.join(dst_subdir, file_name) if file_name.endswith('.py'): annotated |= _expand_file_annotations(src_path, dst_path) else: shutil.copyfile(src_path, dst_path) for dir_name in dirs: os.makedirs(os.path.join(dst_subdir, dir_name), exist_ok=True) return dst_dir if annotated else src_dir
[ "def", "expand_annotations", "(", "src_dir", ",", "dst_dir", ")", ":", "if", "src_dir", "[", "-", "1", "]", "==", "slash", ":", "src_dir", "=", "src_dir", "[", ":", "-", "1", "]", "if", "dst_dir", "[", "-", "1", "]", "==", "slash", ":", "dst_dir", "=", "dst_dir", "[", ":", "-", "1", "]", "annotated", "=", "False", "for", "src_subdir", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "src_dir", ")", ":", "assert", "src_subdir", ".", "startswith", "(", "src_dir", ")", "dst_subdir", "=", "src_subdir", ".", "replace", "(", "src_dir", ",", "dst_dir", ",", "1", ")", "os", ".", "makedirs", "(", "dst_subdir", ",", "exist_ok", "=", "True", ")", "for", "file_name", "in", "files", ":", "src_path", "=", "os", ".", "path", ".", "join", "(", "src_subdir", ",", "file_name", ")", "dst_path", "=", "os", ".", "path", ".", "join", "(", "dst_subdir", ",", "file_name", ")", "if", "file_name", ".", "endswith", "(", "'.py'", ")", ":", "annotated", "|=", "_expand_file_annotations", "(", "src_path", ",", "dst_path", ")", "else", ":", "shutil", ".", "copyfile", "(", "src_path", ",", "dst_path", ")", "for", "dir_name", "in", "dirs", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "dst_subdir", ",", "dir_name", ")", ",", "exist_ok", "=", "True", ")", "return", "dst_dir", "if", "annotated", "else", "src_dir" ]
Expand annotations in user code. Return dst_dir if annotation detected; return src_dir if not. src_dir: directory path of user code (str) dst_dir: directory to place generated files (str)
[ "Expand", "annotations", "in", "user", "code", ".", "Return", "dst_dir", "if", "annotation", "detected", ";", "return", "src_dir", "if", "not", ".", "src_dir", ":", "directory", "path", "of", "user", "code", "(", "str", ")", "dst_dir", ":", "directory", "to", "place", "generated", "files", "(", "str", ")" ]
python
train
mitsei/dlkit
dlkit/records/assessment/qti/extended_text_interaction.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/extended_text_interaction.py#L106-L111
def set_max_strings(self, max_strings): """stub""" if not self.my_osid_object_form._is_valid_integer( max_strings, self.get_max_strings_metadata()): raise InvalidArgument('maxStrings') self.my_osid_object_form._my_map['maxStrings'] = max_strings
[ "def", "set_max_strings", "(", "self", ",", "max_strings", ")", ":", "if", "not", "self", ".", "my_osid_object_form", ".", "_is_valid_integer", "(", "max_strings", ",", "self", ".", "get_max_strings_metadata", "(", ")", ")", ":", "raise", "InvalidArgument", "(", "'maxStrings'", ")", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'maxStrings'", "]", "=", "max_strings" ]
stub
[ "stub" ]
python
train
nerdvegas/rez
src/rez/backport/zipfile.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/backport/zipfile.py#L979-L1000
def _writecheck(self, zinfo): """Check for errors before writing a file to the archive.""" if zinfo.filename in self.NameToInfo: if self.debug: # Warning for duplicate names print "Duplicate name:", zinfo.filename if self.mode not in ("w", "a"): raise RuntimeError, 'write() requires mode "w" or "a"' if not self.fp: raise RuntimeError, \ "Attempt to write ZIP archive that was already closed" if zinfo.compress_type == ZIP_DEFLATED and not zlib: raise RuntimeError, \ "Compression requires the (missing) zlib module" if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED): raise RuntimeError, \ "That compression method is not supported" if zinfo.file_size > ZIP64_LIMIT: if not self._allowZip64: raise LargeZipFile("Filesize would require ZIP64 extensions") if zinfo.header_offset > ZIP64_LIMIT: if not self._allowZip64: raise LargeZipFile("Zipfile size would require ZIP64 extensions")
[ "def", "_writecheck", "(", "self", ",", "zinfo", ")", ":", "if", "zinfo", ".", "filename", "in", "self", ".", "NameToInfo", ":", "if", "self", ".", "debug", ":", "# Warning for duplicate names", "print", "\"Duplicate name:\"", ",", "zinfo", ".", "filename", "if", "self", ".", "mode", "not", "in", "(", "\"w\"", ",", "\"a\"", ")", ":", "raise", "RuntimeError", ",", "'write() requires mode \"w\" or \"a\"'", "if", "not", "self", ".", "fp", ":", "raise", "RuntimeError", ",", "\"Attempt to write ZIP archive that was already closed\"", "if", "zinfo", ".", "compress_type", "==", "ZIP_DEFLATED", "and", "not", "zlib", ":", "raise", "RuntimeError", ",", "\"Compression requires the (missing) zlib module\"", "if", "zinfo", ".", "compress_type", "not", "in", "(", "ZIP_STORED", ",", "ZIP_DEFLATED", ")", ":", "raise", "RuntimeError", ",", "\"That compression method is not supported\"", "if", "zinfo", ".", "file_size", ">", "ZIP64_LIMIT", ":", "if", "not", "self", ".", "_allowZip64", ":", "raise", "LargeZipFile", "(", "\"Filesize would require ZIP64 extensions\"", ")", "if", "zinfo", ".", "header_offset", ">", "ZIP64_LIMIT", ":", "if", "not", "self", ".", "_allowZip64", ":", "raise", "LargeZipFile", "(", "\"Zipfile size would require ZIP64 extensions\"", ")" ]
Check for errors before writing a file to the archive.
[ "Check", "for", "errors", "before", "writing", "a", "file", "to", "the", "archive", "." ]
python
train
proycon/flat
flat/views.py
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/views.py#L123-L177
def initdoc(request, namespace, docid, mode, template, context=None, configuration=None): """Initialise a document (not invoked directly)""" perspective = request.GET.get('perspective','document') if context is None: context = {} if 'configuration' in request.session: configuration = request.session['configuration'] elif configuration is None: return fatalerror(request, "No configuration specified") if configuration not in settings.CONFIGURATIONS: return fatalerror(request, "Specified configuration does not exist") flatargs = { 'setdefinitions': True, 'declarations': True, #implies provenance as well 'metadata': True, 'toc': True, 'slices': request.GET.get('slices',settings.CONFIGURATIONS[configuration].get('slices','p:25,s:100')), #overriden either by configuration or by user 'customslicesize': 0, #disabled for initial probe 'textclasses': True, } error = False try: doc = flat.comm.query(request, "USE " + namespace + "/" + docid + " PROBE", **flatargs) #retrieves only the meta information, not document content context.update(getcontext(request,namespace,docid, doc, mode, configuration)) except Exception as e: context.update(docserveerror(e)) error = True if not error: dorequiredeclaration = 'requiredeclaration' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['requiredeclaration'] if dorequiredeclaration: if not 'declarations' in doc: return fatalerror(request, "Refusing to load document, missing expected declarations, none declared") declarations = doc['declarations'] for annotationtype, annotationset in settings.CONFIGURATIONS[configuration]['requiredeclaration']: found = False for d in declarations: if annotationtype == d['annotationtype'] and (not annotationset or annotationset == d['set']): found = True break if not found: if annotationset: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype + "/" + annotationset) else: return fatalerror(request, "Refusing to load document, missing expected declaration for annotation type " + annotationtype) dometadataindex = 'metadataindex' in settings.CONFIGURATIONS[configuration] and settings.CONFIGURATIONS[configuration]['metadataindex'] if dometadataindex: metadata = json.loads(context['metadata']) for metakey in settings.CONFIGURATIONS[configuration]['metadataindex']: if metakey in metadata: MetadataIndex.objects.update_or_create(namespace=namespace,docid=docid, key=metakey,defaults={'value':metadata[metakey]}) response = render(request, template, context) if 'fatalerror' in context: response.status_code = 500 return response
[ "def", "initdoc", "(", "request", ",", "namespace", ",", "docid", ",", "mode", ",", "template", ",", "context", "=", "None", ",", "configuration", "=", "None", ")", ":", "perspective", "=", "request", ".", "GET", ".", "get", "(", "'perspective'", ",", "'document'", ")", "if", "context", "is", "None", ":", "context", "=", "{", "}", "if", "'configuration'", "in", "request", ".", "session", ":", "configuration", "=", "request", ".", "session", "[", "'configuration'", "]", "elif", "configuration", "is", "None", ":", "return", "fatalerror", "(", "request", ",", "\"No configuration specified\"", ")", "if", "configuration", "not", "in", "settings", ".", "CONFIGURATIONS", ":", "return", "fatalerror", "(", "request", ",", "\"Specified configuration does not exist\"", ")", "flatargs", "=", "{", "'setdefinitions'", ":", "True", ",", "'declarations'", ":", "True", ",", "#implies provenance as well", "'metadata'", ":", "True", ",", "'toc'", ":", "True", ",", "'slices'", ":", "request", ".", "GET", ".", "get", "(", "'slices'", ",", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", ".", "get", "(", "'slices'", ",", "'p:25,s:100'", ")", ")", ",", "#overriden either by configuration or by user", "'customslicesize'", ":", "0", ",", "#disabled for initial probe", "'textclasses'", ":", "True", ",", "}", "error", "=", "False", "try", ":", "doc", "=", "flat", ".", "comm", ".", "query", "(", "request", ",", "\"USE \"", "+", "namespace", "+", "\"/\"", "+", "docid", "+", "\" PROBE\"", ",", "*", "*", "flatargs", ")", "#retrieves only the meta information, not document content", "context", ".", "update", "(", "getcontext", "(", "request", ",", "namespace", ",", "docid", ",", "doc", ",", "mode", ",", "configuration", ")", ")", "except", "Exception", "as", "e", ":", "context", ".", "update", "(", "docserveerror", "(", "e", ")", ")", "error", "=", "True", "if", "not", "error", ":", "dorequiredeclaration", "=", "'requiredeclaration'", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "and", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'requiredeclaration'", "]", "if", "dorequiredeclaration", ":", "if", "not", "'declarations'", "in", "doc", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declarations, none declared\"", ")", "declarations", "=", "doc", "[", "'declarations'", "]", "for", "annotationtype", ",", "annotationset", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'requiredeclaration'", "]", ":", "found", "=", "False", "for", "d", "in", "declarations", ":", "if", "annotationtype", "==", "d", "[", "'annotationtype'", "]", "and", "(", "not", "annotationset", "or", "annotationset", "==", "d", "[", "'set'", "]", ")", ":", "found", "=", "True", "break", "if", "not", "found", ":", "if", "annotationset", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declaration for annotation type \"", "+", "annotationtype", "+", "\"/\"", "+", "annotationset", ")", "else", ":", "return", "fatalerror", "(", "request", ",", "\"Refusing to load document, missing expected declaration for annotation type \"", "+", "annotationtype", ")", "dometadataindex", "=", "'metadataindex'", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "and", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'metadataindex'", "]", "if", "dometadataindex", ":", "metadata", "=", "json", ".", "loads", "(", "context", "[", "'metadata'", "]", ")", "for", "metakey", "in", "settings", ".", "CONFIGURATIONS", "[", "configuration", "]", "[", "'metadataindex'", "]", ":", "if", "metakey", "in", "metadata", ":", "MetadataIndex", ".", "objects", ".", "update_or_create", "(", "namespace", "=", "namespace", ",", "docid", "=", "docid", ",", "key", "=", "metakey", ",", "defaults", "=", "{", "'value'", ":", "metadata", "[", "metakey", "]", "}", ")", "response", "=", "render", "(", "request", ",", "template", ",", "context", ")", "if", "'fatalerror'", "in", "context", ":", "response", ".", "status_code", "=", "500", "return", "response" ]
Initialise a document (not invoked directly)
[ "Initialise", "a", "document", "(", "not", "invoked", "directly", ")" ]
python
train
expfactory/expfactory
expfactory/database/filesystem.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/database/filesystem.py#L56-L66
def generate_subid(self, token=None): '''assumes a flat (file system) database, organized by experiment id, and subject id, with data (json) organized by subject identifier ''' # Not headless auto-increments if not token: token = str(uuid.uuid4()) # Headless doesn't use any folder_id, just generated token folder return "%s/%s" % (self.study_id, token)
[ "def", "generate_subid", "(", "self", ",", "token", "=", "None", ")", ":", "# Not headless auto-increments", "if", "not", "token", ":", "token", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "# Headless doesn't use any folder_id, just generated token folder", "return", "\"%s/%s\"", "%", "(", "self", ".", "study_id", ",", "token", ")" ]
assumes a flat (file system) database, organized by experiment id, and subject id, with data (json) organized by subject identifier
[ "assumes", "a", "flat", "(", "file", "system", ")", "database", "organized", "by", "experiment", "id", "and", "subject", "id", "with", "data", "(", "json", ")", "organized", "by", "subject", "identifier" ]
python
train
s1s5/django_busybody
django_busybody/views.py
https://github.com/s1s5/django_busybody/blob/5c6fd89824224f1de1be79ccd9a149f025af1b8f/django_busybody/views.py#L155-L161
def form_valid(self, forms): """ If the form is valid, save the associated model. """ for key, form in forms.items(): setattr(self, '{}_object'.format(key), form.save()) return super(MultipleModelFormMixin, self).form_valid(forms)
[ "def", "form_valid", "(", "self", ",", "forms", ")", ":", "for", "key", ",", "form", "in", "forms", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "'{}_object'", ".", "format", "(", "key", ")", ",", "form", ".", "save", "(", ")", ")", "return", "super", "(", "MultipleModelFormMixin", ",", "self", ")", ".", "form_valid", "(", "forms", ")" ]
If the form is valid, save the associated model.
[ "If", "the", "form", "is", "valid", "save", "the", "associated", "model", "." ]
python
train
twisted/axiom
axiom/store.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/store.py#L1731-L1779
def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """ class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs)
[ "def", "batchInsert", "(", "self", ",", "itemType", ",", "itemAttributes", ",", "dataRows", ")", ":", "class", "FakeItem", ":", "pass", "_NEEDS_DEFAULT", "=", "object", "(", ")", "# token for lookup failure", "fakeOSelf", "=", "FakeItem", "(", ")", "fakeOSelf", ".", "store", "=", "self", "sql", "=", "itemType", ".", "_baseInsertSQL", "(", "self", ")", "indices", "=", "{", "}", "schema", "=", "[", "attr", "for", "(", "name", ",", "attr", ")", "in", "itemType", ".", "getSchema", "(", ")", "]", "for", "i", ",", "attr", "in", "enumerate", "(", "itemAttributes", ")", ":", "indices", "[", "attr", "]", "=", "i", "for", "row", "in", "dataRows", ":", "oid", "=", "self", ".", "store", ".", "executeSchemaSQL", "(", "_schema", ".", "CREATE_OBJECT", ",", "[", "self", ".", "store", ".", "getTypeID", "(", "itemType", ")", "]", ")", "insertArgs", "=", "[", "oid", "]", "for", "attr", "in", "schema", ":", "i", "=", "indices", ".", "get", "(", "attr", ",", "_NEEDS_DEFAULT", ")", "if", "i", "is", "_NEEDS_DEFAULT", ":", "pyval", "=", "attr", ".", "default", "else", ":", "pyval", "=", "row", "[", "i", "]", "dbval", "=", "attr", ".", "_convertPyval", "(", "fakeOSelf", ",", "pyval", ")", "insertArgs", ".", "append", "(", "dbval", ")", "self", ".", "executeSQL", "(", "sql", ",", "insertArgs", ")" ]
Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None.
[ "Create", "multiple", "items", "in", "the", "store", "without", "loading", "corresponding", "Python", "objects", "into", "memory", "." ]
python
train
sorgerlab/indra
rest_api/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L187-L196
def biopax_process_pc_pathsfromto(): """Process PathwayCommons paths from-to genes, return INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) source = body.get('source') target = body.get('target') bp = biopax.process_pc_pathsfromto(source, target) return _stmts_from_proc(bp)
[ "def", "biopax_process_pc_pathsfromto", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "source", "=", "body", ".", "get", "(", "'source'", ")", "target", "=", "body", ".", "get", "(", "'target'", ")", "bp", "=", "biopax", ".", "process_pc_pathsfromto", "(", "source", ",", "target", ")", "return", "_stmts_from_proc", "(", "bp", ")" ]
Process PathwayCommons paths from-to genes, return INDRA Statements.
[ "Process", "PathwayCommons", "paths", "from", "-", "to", "genes", "return", "INDRA", "Statements", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueArray.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueArray.py#L248-L259
def get_as_float_with_default(self, index, default_value): """ Converts array element into a float or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: float value ot the element or default value if conversion is not supported. """ value = self[index] return FloatConverter.to_float_with_default(value, default_value)
[ "def", "get_as_float_with_default", "(", "self", ",", "index", ",", "default_value", ")", ":", "value", "=", "self", "[", "index", "]", "return", "FloatConverter", ".", "to_float_with_default", "(", "value", ",", "default_value", ")" ]
Converts array element into a float or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: float value ot the element or default value if conversion is not supported.
[ "Converts", "array", "element", "into", "a", "float", "or", "returns", "default", "value", "if", "conversion", "is", "not", "possible", "." ]
python
train
sundarnagarajan/py25519
setupext/__init__.py
https://github.com/sundarnagarajan/py25519/blob/3756331d1b6ff1f797b8f23567c4ac490bd07680/setupext/__init__.py#L116-L184
def run_in_order(l, show_output=True, show_err=True, ignore_err=False, args=(), **kwargs): ''' Processes each element of l in order: if it is a string: execute it as a shell command elif it is a callable, call it with *args, **kwargs l-->list: Each elem is either a string (shell command) or callable Any other type is ignored show_output-->boolean: Show stdout of shell commands Does not affect callables show_err-->Boolean: Show stderr of shell commands Does not affect callables ignore_err-->boolean: Continue after exception or shell command wth return code != 0 Returns-->Nothing if ignore_err == False, exceptions are re-raised, hence shown ------------------------------------------------------------------ show_output show_err ignore_err stdout stderr exception continue trace ------------------------------------------------------------------ True True False SHOW SHOW SHOW NO True False False SHOW HIDE SHOW NO False True False HIDE SHOW SHOW NO False False False HIDE HIDE SHOW NO True True True SHOW SHOW SHOW YES True False True SHOW HIDE HIDE YES False True True HIDE SHOW SHOW YES False False True HIDE HIDE HIDE YES ------------------------------------------------------------------ ----------- DEFAULT ----------- SHOW SHOW SHOW NO ------------------------------------------------------------------ ''' # Set defaults if show_output is None: show_output = True if show_err is None: show_err = True if ignore_err is None: ignore_err = False if args is None: args = () for c in l: try: if isinstance(c, str): devnull = open(os.devnull, 'w') if not show_err: stderr = devnull else: stderr = None if not show_output: stdout = devnull else: stdout = None retcode = subprocess.call( c, shell=True, stdout=stdout, stderr=stderr) if not ignore_err and retcode != 0: break elif hasattr(c, '__call__'): c(*args, **kwargs) except: if not ignore_err: raise if show_err: sys.stderr.write(traceback.format_exc())
[ "def", "run_in_order", "(", "l", ",", "show_output", "=", "True", ",", "show_err", "=", "True", ",", "ignore_err", "=", "False", ",", "args", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# Set defaults", "if", "show_output", "is", "None", ":", "show_output", "=", "True", "if", "show_err", "is", "None", ":", "show_err", "=", "True", "if", "ignore_err", "is", "None", ":", "ignore_err", "=", "False", "if", "args", "is", "None", ":", "args", "=", "(", ")", "for", "c", "in", "l", ":", "try", ":", "if", "isinstance", "(", "c", ",", "str", ")", ":", "devnull", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "if", "not", "show_err", ":", "stderr", "=", "devnull", "else", ":", "stderr", "=", "None", "if", "not", "show_output", ":", "stdout", "=", "devnull", "else", ":", "stdout", "=", "None", "retcode", "=", "subprocess", ".", "call", "(", "c", ",", "shell", "=", "True", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ")", "if", "not", "ignore_err", "and", "retcode", "!=", "0", ":", "break", "elif", "hasattr", "(", "c", ",", "'__call__'", ")", ":", "c", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", ":", "if", "not", "ignore_err", ":", "raise", "if", "show_err", ":", "sys", ".", "stderr", ".", "write", "(", "traceback", ".", "format_exc", "(", ")", ")" ]
Processes each element of l in order: if it is a string: execute it as a shell command elif it is a callable, call it with *args, **kwargs l-->list: Each elem is either a string (shell command) or callable Any other type is ignored show_output-->boolean: Show stdout of shell commands Does not affect callables show_err-->Boolean: Show stderr of shell commands Does not affect callables ignore_err-->boolean: Continue after exception or shell command wth return code != 0 Returns-->Nothing if ignore_err == False, exceptions are re-raised, hence shown ------------------------------------------------------------------ show_output show_err ignore_err stdout stderr exception continue trace ------------------------------------------------------------------ True True False SHOW SHOW SHOW NO True False False SHOW HIDE SHOW NO False True False HIDE SHOW SHOW NO False False False HIDE HIDE SHOW NO True True True SHOW SHOW SHOW YES True False True SHOW HIDE HIDE YES False True True HIDE SHOW SHOW YES False False True HIDE HIDE HIDE YES ------------------------------------------------------------------ ----------- DEFAULT ----------- SHOW SHOW SHOW NO ------------------------------------------------------------------
[ "Processes", "each", "element", "of", "l", "in", "order", ":", "if", "it", "is", "a", "string", ":", "execute", "it", "as", "a", "shell", "command", "elif", "it", "is", "a", "callable", "call", "it", "with", "*", "args", "**", "kwargs" ]
python
train
davgeo/clear
clear/database.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/database.py#L184-L194
def _PurgeTable(self, tableName): """ Deletes all rows from given table without dropping table. Parameters ---------- tableName : string Name of table. """ goodlogging.Log.Info("DB", "Deleting all entries from table {0}".format(tableName), verbosity=self.logVerbosity) self._ActionDatabase("DELETE FROM {0}".format(tableName))
[ "def", "_PurgeTable", "(", "self", ",", "tableName", ")", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"DB\"", ",", "\"Deleting all entries from table {0}\"", ".", "format", "(", "tableName", ")", ",", "verbosity", "=", "self", ".", "logVerbosity", ")", "self", ".", "_ActionDatabase", "(", "\"DELETE FROM {0}\"", ".", "format", "(", "tableName", ")", ")" ]
Deletes all rows from given table without dropping table. Parameters ---------- tableName : string Name of table.
[ "Deletes", "all", "rows", "from", "given", "table", "without", "dropping", "table", "." ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L2746-L2755
def verify_database(trusted_consensus_hash, consensus_block_height, untrusted_working_dir, trusted_working_dir, start_block=None, expected_snapshots={}): """ Verify that a database is consistent with a known-good consensus hash. Return True if valid. Return False if not """ db = BlockstackDB.get_readwrite_instance(trusted_working_dir) consensus_impl = virtualchain_hooks return virtualchain.state_engine_verify(trusted_consensus_hash, consensus_block_height, consensus_impl, untrusted_working_dir, db, start_block=start_block, expected_snapshots=expected_snapshots)
[ "def", "verify_database", "(", "trusted_consensus_hash", ",", "consensus_block_height", ",", "untrusted_working_dir", ",", "trusted_working_dir", ",", "start_block", "=", "None", ",", "expected_snapshots", "=", "{", "}", ")", ":", "db", "=", "BlockstackDB", ".", "get_readwrite_instance", "(", "trusted_working_dir", ")", "consensus_impl", "=", "virtualchain_hooks", "return", "virtualchain", ".", "state_engine_verify", "(", "trusted_consensus_hash", ",", "consensus_block_height", ",", "consensus_impl", ",", "untrusted_working_dir", ",", "db", ",", "start_block", "=", "start_block", ",", "expected_snapshots", "=", "expected_snapshots", ")" ]
Verify that a database is consistent with a known-good consensus hash. Return True if valid. Return False if not
[ "Verify", "that", "a", "database", "is", "consistent", "with", "a", "known", "-", "good", "consensus", "hash", ".", "Return", "True", "if", "valid", ".", "Return", "False", "if", "not" ]
python
train
Anaconda-Platform/anaconda-client
binstar_client/mixins/channels.py
https://github.com/Anaconda-Platform/anaconda-client/blob/b276f0572744c73c184a8b43a897cfa7fc1dc523/binstar_client/mixins/channels.py#L22-L30
def show_channel(self, channel, owner): '''List the channels for owner If owner is none, the currently logged in user is used ''' url = '%s/channels/%s/%s' % (self.domain, owner, channel) res = self.session.get(url) self._check_response(res, [200]) return res.json()
[ "def", "show_channel", "(", "self", ",", "channel", ",", "owner", ")", ":", "url", "=", "'%s/channels/%s/%s'", "%", "(", "self", ".", "domain", ",", "owner", ",", "channel", ")", "res", "=", "self", ".", "session", ".", "get", "(", "url", ")", "self", ".", "_check_response", "(", "res", ",", "[", "200", "]", ")", "return", "res", ".", "json", "(", ")" ]
List the channels for owner If owner is none, the currently logged in user is used
[ "List", "the", "channels", "for", "owner", "If", "owner", "is", "none", "the", "currently", "logged", "in", "user", "is", "used" ]
python
train
grabbles/grabbit
grabbit/utils.py
https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/utils.py#L34-L37
def listify(obj, ignore=(list, tuple, type(None))): ''' Wraps all non-list or tuple objects in a list; provides a simple way to accept flexible arguments. ''' return obj if isinstance(obj, ignore) else [obj]
[ "def", "listify", "(", "obj", ",", "ignore", "=", "(", "list", ",", "tuple", ",", "type", "(", "None", ")", ")", ")", ":", "return", "obj", "if", "isinstance", "(", "obj", ",", "ignore", ")", "else", "[", "obj", "]" ]
Wraps all non-list or tuple objects in a list; provides a simple way to accept flexible arguments.
[ "Wraps", "all", "non", "-", "list", "or", "tuple", "objects", "in", "a", "list", ";", "provides", "a", "simple", "way", "to", "accept", "flexible", "arguments", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/snw.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/snw.py#L211-L244
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Orographic Gage File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into HmetRecords with open(path, 'r') as orthoFile: for line in orthoFile: sline = line.strip().split() # Cases if sline[0].lower() == 'num_sites:': self.numSites = sline[1] elif sline[0].lower() == 'elev_base': self.elevBase = sline[1] elif sline[0].lower() == 'elev_2': self.elev2 = sline[1] elif sline[0].lower() == 'year': """DO NOTHING""" else: # Create datetime object dateTime = datetime(year=int(sline[0]), month=int(sline[1]), day=int(sline[2]), hour=int(sline[3])) # Create GSSHAPY OrthoMeasurement object measurement = OrographicMeasurement(dateTime=dateTime, temp2=sline[4]) # Associate OrthoMeasurement with OrthographicGageFile self.orographicMeasurements.append(measurement)
[ "def", "_read", "(", "self", ",", "directory", ",", "filename", ",", "session", ",", "path", ",", "name", ",", "extension", ",", "spatial", ",", "spatialReferenceID", ",", "replaceParamFile", ")", ":", "# Set file extension property", "self", ".", "fileExtension", "=", "extension", "# Open file and parse into HmetRecords", "with", "open", "(", "path", ",", "'r'", ")", "as", "orthoFile", ":", "for", "line", "in", "orthoFile", ":", "sline", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "# Cases", "if", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'num_sites:'", ":", "self", ".", "numSites", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'elev_base'", ":", "self", ".", "elevBase", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'elev_2'", ":", "self", ".", "elev2", "=", "sline", "[", "1", "]", "elif", "sline", "[", "0", "]", ".", "lower", "(", ")", "==", "'year'", ":", "\"\"\"DO NOTHING\"\"\"", "else", ":", "# Create datetime object", "dateTime", "=", "datetime", "(", "year", "=", "int", "(", "sline", "[", "0", "]", ")", ",", "month", "=", "int", "(", "sline", "[", "1", "]", ")", ",", "day", "=", "int", "(", "sline", "[", "2", "]", ")", ",", "hour", "=", "int", "(", "sline", "[", "3", "]", ")", ")", "# Create GSSHAPY OrthoMeasurement object", "measurement", "=", "OrographicMeasurement", "(", "dateTime", "=", "dateTime", ",", "temp2", "=", "sline", "[", "4", "]", ")", "# Associate OrthoMeasurement with OrthographicGageFile", "self", ".", "orographicMeasurements", ".", "append", "(", "measurement", ")" ]
Orographic Gage File Read from File Method
[ "Orographic", "Gage", "File", "Read", "from", "File", "Method" ]
python
train
odlgroup/odl
odl/solvers/functional/default_functionals.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/default_functionals.py#L1424-L1461
def gradient(self): """Gradient operator of the functional. The gradient is not defined in points where one or more components are less than or equal to 0. """ functional = self class KLCrossEntropyGradient(Operator): """The gradient operator of this functional.""" def __init__(self): """Initialize a new instance.""" super(KLCrossEntropyGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. The gradient is not defined in for points with components less than or equal to zero. """ if functional.prior is None: tmp = np.log(x) else: tmp = np.log(x / functional.prior) if np.all(np.isfinite(tmp)): return tmp else: # The derivative is not defined. raise ValueError('The gradient of the Kullback-Leibler ' 'Cross Entropy functional is not defined ' 'for `x` with one or more components ' 'less than or equal to zero.'.format(x)) return KLCrossEntropyGradient()
[ "def", "gradient", "(", "self", ")", ":", "functional", "=", "self", "class", "KLCrossEntropyGradient", "(", "Operator", ")", ":", "\"\"\"The gradient operator of this functional.\"\"\"", "def", "__init__", "(", "self", ")", ":", "\"\"\"Initialize a new instance.\"\"\"", "super", "(", "KLCrossEntropyGradient", ",", "self", ")", ".", "__init__", "(", "functional", ".", "domain", ",", "functional", ".", "domain", ",", "linear", "=", "False", ")", "def", "_call", "(", "self", ",", "x", ")", ":", "\"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in for points with components less\n than or equal to zero.\n \"\"\"", "if", "functional", ".", "prior", "is", "None", ":", "tmp", "=", "np", ".", "log", "(", "x", ")", "else", ":", "tmp", "=", "np", ".", "log", "(", "x", "/", "functional", ".", "prior", ")", "if", "np", ".", "all", "(", "np", ".", "isfinite", "(", "tmp", ")", ")", ":", "return", "tmp", "else", ":", "# The derivative is not defined.", "raise", "ValueError", "(", "'The gradient of the Kullback-Leibler '", "'Cross Entropy functional is not defined '", "'for `x` with one or more components '", "'less than or equal to zero.'", ".", "format", "(", "x", ")", ")", "return", "KLCrossEntropyGradient", "(", ")" ]
Gradient operator of the functional. The gradient is not defined in points where one or more components are less than or equal to 0.
[ "Gradient", "operator", "of", "the", "functional", "." ]
python
train
singularityhub/singularity-python
singularity/analysis/reproduce/levels.py
https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L34-L61
def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None): '''get_custom_level will generate a custom level for the user, based on a regular expression. If used outside the context of tarsum, the user can generate their own named and described filters. :param regexp: must be defined, the file filter regular expression :param description: optional description ''' if regexp == None: regexp = "." if description is None: description = "This is a custom filter generated by the user." custom = {"description":description, "regexp":regexp} # Include extra files? if include_files is not None: if not isinstance(include_files,set): include_files = set(include_files) custom['include_files'] = include_files # Skip files? if skip_files is not None: if not isinstance(skip_files,set): skip_files = set(skip_files) custom['skip_files'] = skip_files return custom
[ "def", "get_custom_level", "(", "regexp", "=", "None", ",", "description", "=", "None", ",", "skip_files", "=", "None", ",", "include_files", "=", "None", ")", ":", "if", "regexp", "==", "None", ":", "regexp", "=", "\".\"", "if", "description", "is", "None", ":", "description", "=", "\"This is a custom filter generated by the user.\"", "custom", "=", "{", "\"description\"", ":", "description", ",", "\"regexp\"", ":", "regexp", "}", "# Include extra files?", "if", "include_files", "is", "not", "None", ":", "if", "not", "isinstance", "(", "include_files", ",", "set", ")", ":", "include_files", "=", "set", "(", "include_files", ")", "custom", "[", "'include_files'", "]", "=", "include_files", "# Skip files?", "if", "skip_files", "is", "not", "None", ":", "if", "not", "isinstance", "(", "skip_files", ",", "set", ")", ":", "skip_files", "=", "set", "(", "skip_files", ")", "custom", "[", "'skip_files'", "]", "=", "skip_files", "return", "custom" ]
get_custom_level will generate a custom level for the user, based on a regular expression. If used outside the context of tarsum, the user can generate their own named and described filters. :param regexp: must be defined, the file filter regular expression :param description: optional description
[ "get_custom_level", "will", "generate", "a", "custom", "level", "for", "the", "user", "based", "on", "a", "regular", "expression", ".", "If", "used", "outside", "the", "context", "of", "tarsum", "the", "user", "can", "generate", "their", "own", "named", "and", "described", "filters", ".", ":", "param", "regexp", ":", "must", "be", "defined", "the", "file", "filter", "regular", "expression", ":", "param", "description", ":", "optional", "description" ]
python
train
ansible/ansible-runner
ansible_runner/runner_config.py
https://github.com/ansible/ansible-runner/blob/8ce485480a5d0b602428d9d64a752e06fb46cdb8/ansible_runner/runner_config.py#L290-L365
def generate_ansible_command(self): """ Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the :py:class:`ansible_runner.runner.Runner` object to start the process """ if self.binary is not None: base_command = self.binary self.execution_mode = ExecutionMode.RAW elif self.module is not None: base_command = 'ansible' self.execution_mode = ExecutionMode.ANSIBLE else: base_command = 'ansible-playbook' self.execution_mode = ExecutionMode.ANSIBLE_PLAYBOOK exec_list = [base_command] try: cmdline_args = self.loader.load_file('env/cmdline', string_types, encoding=None) args = shlex.split(cmdline_args) exec_list.extend(args) except ConfigurationError: pass if isinstance(self.inventory, list): for i in self.inventory: exec_list.append("-i") exec_list.append(i) else: exec_list.append("-i") exec_list.append(self.inventory) if self.limit is not None: exec_list.append("--limit") exec_list.append(self.limit) if self.loader.isfile('env/extravars'): exec_list.extend(['-e', '@{}'.format(self.loader.abspath('env/extravars'))]) if isinstance(self.extra_vars, dict) and self.extra_vars: exec_list.extend( [ '-e', '%s' % ' '.join( ["{}=\"{}\"".format(k, self.extra_vars[k]) for k in self.extra_vars] ) ] ) if self.verbosity: v = 'v' * self.verbosity exec_list.append('-{}'.format(v)) if self.tags: exec_list.extend(['--tags', '{}'.format(self.tags)]) if self.skip_tags: exec_list.extend(['--skip-tags', '{}'.format(self.skip_tags)]) if self.forks: exec_list.extend(['--forks', '{}'.format(self.forks)]) # Other parameters if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK: exec_list.append(self.playbook) elif self.execution_mode == ExecutionMode.ANSIBLE: exec_list.append("-m") exec_list.append(self.module) if self.module_args is not None: exec_list.append("-a") exec_list.append(self.module_args) if self.host_pattern is not None: exec_list.append(self.host_pattern) return exec_list
[ "def", "generate_ansible_command", "(", "self", ")", ":", "if", "self", ".", "binary", "is", "not", "None", ":", "base_command", "=", "self", ".", "binary", "self", ".", "execution_mode", "=", "ExecutionMode", ".", "RAW", "elif", "self", ".", "module", "is", "not", "None", ":", "base_command", "=", "'ansible'", "self", ".", "execution_mode", "=", "ExecutionMode", ".", "ANSIBLE", "else", ":", "base_command", "=", "'ansible-playbook'", "self", ".", "execution_mode", "=", "ExecutionMode", ".", "ANSIBLE_PLAYBOOK", "exec_list", "=", "[", "base_command", "]", "try", ":", "cmdline_args", "=", "self", ".", "loader", ".", "load_file", "(", "'env/cmdline'", ",", "string_types", ",", "encoding", "=", "None", ")", "args", "=", "shlex", ".", "split", "(", "cmdline_args", ")", "exec_list", ".", "extend", "(", "args", ")", "except", "ConfigurationError", ":", "pass", "if", "isinstance", "(", "self", ".", "inventory", ",", "list", ")", ":", "for", "i", "in", "self", ".", "inventory", ":", "exec_list", ".", "append", "(", "\"-i\"", ")", "exec_list", ".", "append", "(", "i", ")", "else", ":", "exec_list", ".", "append", "(", "\"-i\"", ")", "exec_list", ".", "append", "(", "self", ".", "inventory", ")", "if", "self", ".", "limit", "is", "not", "None", ":", "exec_list", ".", "append", "(", "\"--limit\"", ")", "exec_list", ".", "append", "(", "self", ".", "limit", ")", "if", "self", ".", "loader", ".", "isfile", "(", "'env/extravars'", ")", ":", "exec_list", ".", "extend", "(", "[", "'-e'", ",", "'@{}'", ".", "format", "(", "self", ".", "loader", ".", "abspath", "(", "'env/extravars'", ")", ")", "]", ")", "if", "isinstance", "(", "self", ".", "extra_vars", ",", "dict", ")", "and", "self", ".", "extra_vars", ":", "exec_list", ".", "extend", "(", "[", "'-e'", ",", "'%s'", "%", "' '", ".", "join", "(", "[", "\"{}=\\\"{}\\\"\"", ".", "format", "(", "k", ",", "self", ".", "extra_vars", "[", "k", "]", ")", "for", "k", "in", "self", ".", "extra_vars", "]", ")", "]", ")", "if", "self", ".", "verbosity", ":", "v", "=", "'v'", "*", "self", ".", "verbosity", "exec_list", ".", "append", "(", "'-{}'", ".", "format", "(", "v", ")", ")", "if", "self", ".", "tags", ":", "exec_list", ".", "extend", "(", "[", "'--tags'", ",", "'{}'", ".", "format", "(", "self", ".", "tags", ")", "]", ")", "if", "self", ".", "skip_tags", ":", "exec_list", ".", "extend", "(", "[", "'--skip-tags'", ",", "'{}'", ".", "format", "(", "self", ".", "skip_tags", ")", "]", ")", "if", "self", ".", "forks", ":", "exec_list", ".", "extend", "(", "[", "'--forks'", ",", "'{}'", ".", "format", "(", "self", ".", "forks", ")", "]", ")", "# Other parameters", "if", "self", ".", "execution_mode", "==", "ExecutionMode", ".", "ANSIBLE_PLAYBOOK", ":", "exec_list", ".", "append", "(", "self", ".", "playbook", ")", "elif", "self", ".", "execution_mode", "==", "ExecutionMode", ".", "ANSIBLE", ":", "exec_list", ".", "append", "(", "\"-m\"", ")", "exec_list", ".", "append", "(", "self", ".", "module", ")", "if", "self", ".", "module_args", "is", "not", "None", ":", "exec_list", ".", "append", "(", "\"-a\"", ")", "exec_list", ".", "append", "(", "self", ".", "module_args", ")", "if", "self", ".", "host_pattern", "is", "not", "None", ":", "exec_list", ".", "append", "(", "self", ".", "host_pattern", ")", "return", "exec_list" ]
Given that the ``RunnerConfig`` preparation methods have been run to gather the inputs this method will generate the ``ansible`` or ``ansible-playbook`` command that will be used by the :py:class:`ansible_runner.runner.Runner` object to start the process
[ "Given", "that", "the", "RunnerConfig", "preparation", "methods", "have", "been", "run", "to", "gather", "the", "inputs", "this", "method", "will", "generate", "the", "ansible", "or", "ansible", "-", "playbook", "command", "that", "will", "be", "used", "by", "the", ":", "py", ":", "class", ":", "ansible_runner", ".", "runner", ".", "Runner", "object", "to", "start", "the", "process" ]
python
train
scivision/pymap3d
pymap3d/sidereal.py
https://github.com/scivision/pymap3d/blob/c9cf676594611cdb52ff7e0eca6388c80ed4f63f/pymap3d/sidereal.py#L58-L97
def juliandate(time: datetime) -> float: """ Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date """ times = np.atleast_1d(time) assert times.ndim == 1 jd = np.empty(times.size) for i, t in enumerate(times): if t.month < 3: year = t.year - 1 month = t.month + 12 else: year = t.year month = t.month A = int(year / 100.0) B = 2 - A + int(A / 4.) C = ((t.second / 60. + t.minute) / 60. + t.hour) / 24. jd[i] = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + t.day + B - 1524.5 + C) return jd.squeeze()
[ "def", "juliandate", "(", "time", ":", "datetime", ")", "->", "float", ":", "times", "=", "np", ".", "atleast_1d", "(", "time", ")", "assert", "times", ".", "ndim", "==", "1", "jd", "=", "np", ".", "empty", "(", "times", ".", "size", ")", "for", "i", ",", "t", "in", "enumerate", "(", "times", ")", ":", "if", "t", ".", "month", "<", "3", ":", "year", "=", "t", ".", "year", "-", "1", "month", "=", "t", ".", "month", "+", "12", "else", ":", "year", "=", "t", ".", "year", "month", "=", "t", ".", "month", "A", "=", "int", "(", "year", "/", "100.0", ")", "B", "=", "2", "-", "A", "+", "int", "(", "A", "/", "4.", ")", "C", "=", "(", "(", "t", ".", "second", "/", "60.", "+", "t", ".", "minute", ")", "/", "60.", "+", "t", ".", "hour", ")", "/", "24.", "jd", "[", "i", "]", "=", "(", "int", "(", "365.25", "*", "(", "year", "+", "4716", ")", ")", "+", "int", "(", "30.6001", "*", "(", "month", "+", "1", ")", ")", "+", "t", ".", "day", "+", "B", "-", "1524.5", "+", "C", ")", "return", "jd", ".", "squeeze", "(", ")" ]
Python datetime to Julian time from D.Vallado Fundamentals of Astrodynamics and Applications p.187 and J. Meeus Astronomical Algorithms 1991 Eqn. 7.1 pg. 61 Parameters ---------- time : datetime.datetime time to convert Results ------- jd : float Julian date
[ "Python", "datetime", "to", "Julian", "time" ]
python
train
cltl/KafNafParserPy
KafNafParserPy/header_data.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/header_data.py#L372-L381
def set_endTimestamp(self,etimestamp=None): """ Set the end timestamp of the linguistic processor, set to None for the current time @type etimestamp: string @param etimestamp: version of the linguistic processor """ if etimestamp is None: import time etimestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z') self.node.set('endTimestamp',etimestamp)
[ "def", "set_endTimestamp", "(", "self", ",", "etimestamp", "=", "None", ")", ":", "if", "etimestamp", "is", "None", ":", "import", "time", "etimestamp", "=", "time", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S%Z'", ")", "self", ".", "node", ".", "set", "(", "'endTimestamp'", ",", "etimestamp", ")" ]
Set the end timestamp of the linguistic processor, set to None for the current time @type etimestamp: string @param etimestamp: version of the linguistic processor
[ "Set", "the", "end", "timestamp", "of", "the", "linguistic", "processor", "set", "to", "None", "for", "the", "current", "time" ]
python
train
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L1511-L1526
def iter(self, start=0, count=1000): """ @start: #int cursor start position @stop: #int cursor stop position @count: #int buffer limit -> yields all of the items in the list """ cursor = '0' _loads = self._loads stop = start + count while cursor: cursor = self._client.lrange(self.key_prefix, start, stop) for x in cursor or []: yield _loads(x) start += (count + 1) stop += (count + 1)
[ "def", "iter", "(", "self", ",", "start", "=", "0", ",", "count", "=", "1000", ")", ":", "cursor", "=", "'0'", "_loads", "=", "self", ".", "_loads", "stop", "=", "start", "+", "count", "while", "cursor", ":", "cursor", "=", "self", ".", "_client", ".", "lrange", "(", "self", ".", "key_prefix", ",", "start", ",", "stop", ")", "for", "x", "in", "cursor", "or", "[", "]", ":", "yield", "_loads", "(", "x", ")", "start", "+=", "(", "count", "+", "1", ")", "stop", "+=", "(", "count", "+", "1", ")" ]
@start: #int cursor start position @stop: #int cursor stop position @count: #int buffer limit -> yields all of the items in the list
[ "@start", ":", "#int", "cursor", "start", "position", "@stop", ":", "#int", "cursor", "stop", "position", "@count", ":", "#int", "buffer", "limit" ]
python
train
tomnor/channelpack
channelpack/pack.py
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pack.py#L431-L472
def add_condition(self, conkey, cond): """Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str The condition string. See ... .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions` """ # Audit: if conkey == 'start' or conkey == 'stop': conkey += 'cond' if not any(conkey.startswith(addable) for addable in _ADDABLES): raise KeyError(conkey) if not self.conconf.valid_conkey(conkey): raise KeyError(conkey) self._parse_cond(cond) # Checking conkey = self.conconf.next_conkey(conkey) self.conconf.set_condition(conkey, cond) if not self.no_auto: self.make_mask()
[ "def", "add_condition", "(", "self", ",", "conkey", ",", "cond", ")", ":", "# Audit:", "if", "conkey", "==", "'start'", "or", "conkey", "==", "'stop'", ":", "conkey", "+=", "'cond'", "if", "not", "any", "(", "conkey", ".", "startswith", "(", "addable", ")", "for", "addable", "in", "_ADDABLES", ")", ":", "raise", "KeyError", "(", "conkey", ")", "if", "not", "self", ".", "conconf", ".", "valid_conkey", "(", "conkey", ")", ":", "raise", "KeyError", "(", "conkey", ")", "self", ".", "_parse_cond", "(", "cond", ")", "# Checking", "conkey", "=", "self", ".", "conconf", ".", "next_conkey", "(", "conkey", ")", "self", ".", "conconf", ".", "set_condition", "(", "conkey", ",", "cond", ")", "if", "not", "self", ".", "no_auto", ":", "self", ".", "make_mask", "(", ")" ]
Add a condition, one of the addable ones. conkey: str One of 'cond', startcond' or 'stopcond'. 'start' or 'stop' is accepted as shorts for 'startcond' or 'stopcond'. If the conkey is given with an explicit number (like 'stopcond3') and already exist, it will be over-written, else created. When the trailing number is implicit, the first condition with a value of None is taken. If no None value is found, a new condition is added. cond: str The condition string. See ... .. note:: Updates the mask if not no_auto. .. seealso:: :meth:`~channelpack.ChannelPack.set_duration` :meth:`~channelpack.ChannelPack.set_samplerate` :meth:`~channelpack.ChannelPack.set_stopextend` :meth:`~channelpack.ChannelPack.clear_conditions`
[ "Add", "a", "condition", "one", "of", "the", "addable", "ones", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1493-L1526
def get_status_badge(self, project, definition, branch_name=None, stage_name=None, job_name=None, configuration=None, label=None): """GetStatusBadge. [Preview API] <p>Gets the build status for a definition, optionally scoped to a specific branch, stage, job, and configuration.</p> <p>If there are more than one, then it is required to pass in a stageName value when specifying a jobName, and the same rule then applies for both if passing a configuration parameter.</p> :param str project: Project ID or project name :param str definition: Either the definition name with optional leading folder path, or the definition id. :param str branch_name: Only consider the most recent build for this branch. :param str stage_name: Use this stage within the pipeline to render the status. :param str job_name: Use this job within a stage of the pipeline to render the status. :param str configuration: Use this job configuration to render the status :param str label: Replaces the default text on the left side of the badge. :rtype: str """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition is not None: route_values['definition'] = self._serialize.url('definition', definition, 'str') query_parameters = {} if branch_name is not None: query_parameters['branchName'] = self._serialize.query('branch_name', branch_name, 'str') if stage_name is not None: query_parameters['stageName'] = self._serialize.query('stage_name', stage_name, 'str') if job_name is not None: query_parameters['jobName'] = self._serialize.query('job_name', job_name, 'str') if configuration is not None: query_parameters['configuration'] = self._serialize.query('configuration', configuration, 'str') if label is not None: query_parameters['label'] = self._serialize.query('label', label, 'str') response = self._send(http_method='GET', location_id='07acfdce-4757-4439-b422-ddd13a2fcc10', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('str', response)
[ "def", "get_status_badge", "(", "self", ",", "project", ",", "definition", ",", "branch_name", "=", "None", ",", "stage_name", "=", "None", ",", "job_name", "=", "None", ",", "configuration", "=", "None", ",", "label", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "definition", "is", "not", "None", ":", "route_values", "[", "'definition'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'definition'", ",", "definition", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "branch_name", "is", "not", "None", ":", "query_parameters", "[", "'branchName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'branch_name'", ",", "branch_name", ",", "'str'", ")", "if", "stage_name", "is", "not", "None", ":", "query_parameters", "[", "'stageName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'stage_name'", ",", "stage_name", ",", "'str'", ")", "if", "job_name", "is", "not", "None", ":", "query_parameters", "[", "'jobName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'job_name'", ",", "job_name", ",", "'str'", ")", "if", "configuration", "is", "not", "None", ":", "query_parameters", "[", "'configuration'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'configuration'", ",", "configuration", ",", "'str'", ")", "if", "label", "is", "not", "None", ":", "query_parameters", "[", "'label'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'label'", ",", "label", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'07acfdce-4757-4439-b422-ddd13a2fcc10'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'str'", ",", "response", ")" ]
GetStatusBadge. [Preview API] <p>Gets the build status for a definition, optionally scoped to a specific branch, stage, job, and configuration.</p> <p>If there are more than one, then it is required to pass in a stageName value when specifying a jobName, and the same rule then applies for both if passing a configuration parameter.</p> :param str project: Project ID or project name :param str definition: Either the definition name with optional leading folder path, or the definition id. :param str branch_name: Only consider the most recent build for this branch. :param str stage_name: Use this stage within the pipeline to render the status. :param str job_name: Use this job within a stage of the pipeline to render the status. :param str configuration: Use this job configuration to render the status :param str label: Replaces the default text on the left side of the badge. :rtype: str
[ "GetStatusBadge", ".", "[", "Preview", "API", "]", "<p", ">", "Gets", "the", "build", "status", "for", "a", "definition", "optionally", "scoped", "to", "a", "specific", "branch", "stage", "job", "and", "configuration", ".", "<", "/", "p", ">", "<p", ">", "If", "there", "are", "more", "than", "one", "then", "it", "is", "required", "to", "pass", "in", "a", "stageName", "value", "when", "specifying", "a", "jobName", "and", "the", "same", "rule", "then", "applies", "for", "both", "if", "passing", "a", "configuration", "parameter", ".", "<", "/", "p", ">", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "str", "definition", ":", "Either", "the", "definition", "name", "with", "optional", "leading", "folder", "path", "or", "the", "definition", "id", ".", ":", "param", "str", "branch_name", ":", "Only", "consider", "the", "most", "recent", "build", "for", "this", "branch", ".", ":", "param", "str", "stage_name", ":", "Use", "this", "stage", "within", "the", "pipeline", "to", "render", "the", "status", ".", ":", "param", "str", "job_name", ":", "Use", "this", "job", "within", "a", "stage", "of", "the", "pipeline", "to", "render", "the", "status", ".", ":", "param", "str", "configuration", ":", "Use", "this", "job", "configuration", "to", "render", "the", "status", ":", "param", "str", "label", ":", "Replaces", "the", "default", "text", "on", "the", "left", "side", "of", "the", "badge", ".", ":", "rtype", ":", "str" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/js_utils.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/js_utils.py#L14-L40
def wait_for_ready_state_complete(driver, timeout=settings.EXTREME_TIMEOUT): """ The DOM (Document Object Model) has a property called "readyState". When the value of this becomes "complete", page resources are considered fully loaded (although AJAX and other loads might still be happening). This method will wait until document.readyState == "complete". """ start_ms = time.time() * 1000.0 stop_ms = start_ms + (timeout * 1000.0) for x in range(int(timeout * 10)): try: ready_state = driver.execute_script("return document.readyState") except WebDriverException: # Bug fix for: [Permission denied to access property "document"] time.sleep(0.03) return True if ready_state == u'complete': time.sleep(0.01) # Better be sure everything is done loading return True else: now_ms = time.time() * 1000.0 if now_ms >= stop_ms: break time.sleep(0.1) raise Exception( "Page elements never fully loaded after %s seconds!" % timeout)
[ "def", "wait_for_ready_state_complete", "(", "driver", ",", "timeout", "=", "settings", ".", "EXTREME_TIMEOUT", ")", ":", "start_ms", "=", "time", ".", "time", "(", ")", "*", "1000.0", "stop_ms", "=", "start_ms", "+", "(", "timeout", "*", "1000.0", ")", "for", "x", "in", "range", "(", "int", "(", "timeout", "*", "10", ")", ")", ":", "try", ":", "ready_state", "=", "driver", ".", "execute_script", "(", "\"return document.readyState\"", ")", "except", "WebDriverException", ":", "# Bug fix for: [Permission denied to access property \"document\"]", "time", ".", "sleep", "(", "0.03", ")", "return", "True", "if", "ready_state", "==", "u'complete'", ":", "time", ".", "sleep", "(", "0.01", ")", "# Better be sure everything is done loading", "return", "True", "else", ":", "now_ms", "=", "time", ".", "time", "(", ")", "*", "1000.0", "if", "now_ms", ">=", "stop_ms", ":", "break", "time", ".", "sleep", "(", "0.1", ")", "raise", "Exception", "(", "\"Page elements never fully loaded after %s seconds!\"", "%", "timeout", ")" ]
The DOM (Document Object Model) has a property called "readyState". When the value of this becomes "complete", page resources are considered fully loaded (although AJAX and other loads might still be happening). This method will wait until document.readyState == "complete".
[ "The", "DOM", "(", "Document", "Object", "Model", ")", "has", "a", "property", "called", "readyState", ".", "When", "the", "value", "of", "this", "becomes", "complete", "page", "resources", "are", "considered", "fully", "loaded", "(", "although", "AJAX", "and", "other", "loads", "might", "still", "be", "happening", ")", ".", "This", "method", "will", "wait", "until", "document", ".", "readyState", "==", "complete", "." ]
python
train
EmbodiedCognition/py-c3d
c3d.py
https://github.com/EmbodiedCognition/py-c3d/blob/391493d9cb4c6b4aaeee4de2930685e3a67f5845/c3d.py#L136-L171
def read(self, handle): '''Read and parse binary header data from a file handle. This method reads exactly 512 bytes from the beginning of the given file handle. Parameters ---------- handle : file handle The given handle will be reset to 0 using `seek` and then 512 bytes will be read to initialize the attributes in this Header. The handle must be readable. Raises ------ AssertionError If the magic byte from the header is not 80 (the C3D magic value). ''' handle.seek(0) (self.parameter_block, magic, self.point_count, self.analog_count, self.first_frame, self.last_frame, self.max_gap, self.scale_factor, self.data_block, self.analog_per_frame, self.frame_rate, _, self.long_event_labels, self.label_block, _) = struct.unpack(self.BINARY_FORMAT, handle.read(512)) assert magic == 80, 'C3D magic {} != 80 !'.format(magic)
[ "def", "read", "(", "self", ",", "handle", ")", ":", "handle", ".", "seek", "(", "0", ")", "(", "self", ".", "parameter_block", ",", "magic", ",", "self", ".", "point_count", ",", "self", ".", "analog_count", ",", "self", ".", "first_frame", ",", "self", ".", "last_frame", ",", "self", ".", "max_gap", ",", "self", ".", "scale_factor", ",", "self", ".", "data_block", ",", "self", ".", "analog_per_frame", ",", "self", ".", "frame_rate", ",", "_", ",", "self", ".", "long_event_labels", ",", "self", ".", "label_block", ",", "_", ")", "=", "struct", ".", "unpack", "(", "self", ".", "BINARY_FORMAT", ",", "handle", ".", "read", "(", "512", ")", ")", "assert", "magic", "==", "80", ",", "'C3D magic {} != 80 !'", ".", "format", "(", "magic", ")" ]
Read and parse binary header data from a file handle. This method reads exactly 512 bytes from the beginning of the given file handle. Parameters ---------- handle : file handle The given handle will be reset to 0 using `seek` and then 512 bytes will be read to initialize the attributes in this Header. The handle must be readable. Raises ------ AssertionError If the magic byte from the header is not 80 (the C3D magic value).
[ "Read", "and", "parse", "binary", "header", "data", "from", "a", "file", "handle", "." ]
python
train
jfilter/text-classification-keras
texcla/corpus.py
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/corpus.py#L25-L40
def read_pos_neg_data(path, folder, limit): """returns array with positive and negative examples""" training_pos_path = os.path.join(path, folder, 'pos') training_neg_path = os.path.join(path, folder, 'neg') X_pos = read_folder(training_pos_path) X_neg = read_folder(training_neg_path) if limit is None: X = X_pos + X_neg else: X = X_pos[:limit] + X_neg[:limit] y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2) return X, y
[ "def", "read_pos_neg_data", "(", "path", ",", "folder", ",", "limit", ")", ":", "training_pos_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ",", "'pos'", ")", "training_neg_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ",", "'neg'", ")", "X_pos", "=", "read_folder", "(", "training_pos_path", ")", "X_neg", "=", "read_folder", "(", "training_neg_path", ")", "if", "limit", "is", "None", ":", "X", "=", "X_pos", "+", "X_neg", "else", ":", "X", "=", "X_pos", "[", ":", "limit", "]", "+", "X_neg", "[", ":", "limit", "]", "y", "=", "[", "1", "]", "*", "int", "(", "len", "(", "X", ")", "/", "2", ")", "+", "[", "0", "]", "*", "int", "(", "len", "(", "X", ")", "/", "2", ")", "return", "X", ",", "y" ]
returns array with positive and negative examples
[ "returns", "array", "with", "positive", "and", "negative", "examples" ]
python
train
CyberZHG/keras-word-char-embd
keras_wc_embd/word_char_embd.py
https://github.com/CyberZHG/keras-word-char-embd/blob/cca6ddff01b6264dd0d12613bb9ed308e1367b8c/keras_wc_embd/word_char_embd.py#L84-L198
def get_embedding_layer(word_dict_len, char_dict_len, max_word_len, word_embd_dim=300, char_embd_dim=30, char_hidden_dim=150, char_hidden_layer_type='lstm', word_embd_weights=None, char_embd_weights=None, word_embd_trainable=None, char_embd_trainable=None, word_mask_zero=True, char_mask_zero=True): """Get the merged embedding layer. :param word_dict_len: The number of words in the dictionary including the ones mapped to 0 or 1. :param char_dict_len: The number of characters in the dictionary including the ones mapped to 0 or 1. :param max_word_len: The maximum allowed length of word. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer. """ if word_embd_weights is not None: word_embd_weights = [word_embd_weights] if word_embd_trainable is None: word_embd_trainable = word_embd_weights is None if char_embd_weights is not None: char_embd_weights = [char_embd_weights] if char_embd_trainable is None: char_embd_trainable = char_embd_weights is None word_input_layer = keras.layers.Input( shape=(None,), name='Input_Word', ) char_input_layer = keras.layers.Input( shape=(None, max_word_len), name='Input_Char', ) word_embd_layer = keras.layers.Embedding( input_dim=word_dict_len, output_dim=word_embd_dim, mask_zero=word_mask_zero, weights=word_embd_weights, trainable=word_embd_trainable, name='Embedding_Word', )(word_input_layer) char_embd_layer = keras.layers.Embedding( input_dim=char_dict_len, output_dim=char_embd_dim, mask_zero=char_mask_zero, weights=char_embd_weights, trainable=char_embd_trainable, name='Embedding_Char_Pre', )(char_input_layer) if char_hidden_layer_type == 'lstm': char_hidden_layer = keras.layers.Bidirectional( keras.layers.LSTM( units=char_hidden_dim, input_shape=(max_word_len, char_dict_len), return_sequences=False, return_state=False, ), name='Bi-LSTM_Char', ) elif char_hidden_layer_type == 'gru': char_hidden_layer = keras.layers.Bidirectional( keras.layers.GRU( units=char_hidden_dim, input_shape=(max_word_len, char_dict_len), return_sequences=False, return_state=False, ), name='Bi-GRU_Char', ) elif char_hidden_layer_type == 'cnn': char_hidden_layer = [ MaskedConv1D( filters=max(1, char_hidden_dim // 5), kernel_size=3, activation='relu', ), MaskedFlatten(), keras.layers.Dense( units=char_hidden_dim, name='Dense_Char', ), ] elif isinstance(char_hidden_layer_type, list) or isinstance(char_hidden_layer_type, keras.layers.Layer): char_hidden_layer = char_hidden_layer_type else: raise NotImplementedError('Unknown character hidden layer type: %s' % char_hidden_layer_type) if not isinstance(char_hidden_layer, list): char_hidden_layer = [char_hidden_layer] for i, layer in enumerate(char_hidden_layer): if i == len(char_hidden_layer) - 1: name = 'Embedding_Char' else: name = 'Embedding_Char_Pre_%d' % (i + 1) char_embd_layer = keras.layers.TimeDistributed(layer=layer, name=name)(char_embd_layer) embd_layer = keras.layers.Concatenate( name='Embedding', )([word_embd_layer, char_embd_layer]) return [word_input_layer, char_input_layer], embd_layer
[ "def", "get_embedding_layer", "(", "word_dict_len", ",", "char_dict_len", ",", "max_word_len", ",", "word_embd_dim", "=", "300", ",", "char_embd_dim", "=", "30", ",", "char_hidden_dim", "=", "150", ",", "char_hidden_layer_type", "=", "'lstm'", ",", "word_embd_weights", "=", "None", ",", "char_embd_weights", "=", "None", ",", "word_embd_trainable", "=", "None", ",", "char_embd_trainable", "=", "None", ",", "word_mask_zero", "=", "True", ",", "char_mask_zero", "=", "True", ")", ":", "if", "word_embd_weights", "is", "not", "None", ":", "word_embd_weights", "=", "[", "word_embd_weights", "]", "if", "word_embd_trainable", "is", "None", ":", "word_embd_trainable", "=", "word_embd_weights", "is", "None", "if", "char_embd_weights", "is", "not", "None", ":", "char_embd_weights", "=", "[", "char_embd_weights", "]", "if", "char_embd_trainable", "is", "None", ":", "char_embd_trainable", "=", "char_embd_weights", "is", "None", "word_input_layer", "=", "keras", ".", "layers", ".", "Input", "(", "shape", "=", "(", "None", ",", ")", ",", "name", "=", "'Input_Word'", ",", ")", "char_input_layer", "=", "keras", ".", "layers", ".", "Input", "(", "shape", "=", "(", "None", ",", "max_word_len", ")", ",", "name", "=", "'Input_Char'", ",", ")", "word_embd_layer", "=", "keras", ".", "layers", ".", "Embedding", "(", "input_dim", "=", "word_dict_len", ",", "output_dim", "=", "word_embd_dim", ",", "mask_zero", "=", "word_mask_zero", ",", "weights", "=", "word_embd_weights", ",", "trainable", "=", "word_embd_trainable", ",", "name", "=", "'Embedding_Word'", ",", ")", "(", "word_input_layer", ")", "char_embd_layer", "=", "keras", ".", "layers", ".", "Embedding", "(", "input_dim", "=", "char_dict_len", ",", "output_dim", "=", "char_embd_dim", ",", "mask_zero", "=", "char_mask_zero", ",", "weights", "=", "char_embd_weights", ",", "trainable", "=", "char_embd_trainable", ",", "name", "=", "'Embedding_Char_Pre'", ",", ")", "(", "char_input_layer", ")", "if", "char_hidden_layer_type", "==", "'lstm'", ":", "char_hidden_layer", "=", "keras", ".", "layers", ".", "Bidirectional", "(", "keras", ".", "layers", ".", "LSTM", "(", "units", "=", "char_hidden_dim", ",", "input_shape", "=", "(", "max_word_len", ",", "char_dict_len", ")", ",", "return_sequences", "=", "False", ",", "return_state", "=", "False", ",", ")", ",", "name", "=", "'Bi-LSTM_Char'", ",", ")", "elif", "char_hidden_layer_type", "==", "'gru'", ":", "char_hidden_layer", "=", "keras", ".", "layers", ".", "Bidirectional", "(", "keras", ".", "layers", ".", "GRU", "(", "units", "=", "char_hidden_dim", ",", "input_shape", "=", "(", "max_word_len", ",", "char_dict_len", ")", ",", "return_sequences", "=", "False", ",", "return_state", "=", "False", ",", ")", ",", "name", "=", "'Bi-GRU_Char'", ",", ")", "elif", "char_hidden_layer_type", "==", "'cnn'", ":", "char_hidden_layer", "=", "[", "MaskedConv1D", "(", "filters", "=", "max", "(", "1", ",", "char_hidden_dim", "//", "5", ")", ",", "kernel_size", "=", "3", ",", "activation", "=", "'relu'", ",", ")", ",", "MaskedFlatten", "(", ")", ",", "keras", ".", "layers", ".", "Dense", "(", "units", "=", "char_hidden_dim", ",", "name", "=", "'Dense_Char'", ",", ")", ",", "]", "elif", "isinstance", "(", "char_hidden_layer_type", ",", "list", ")", "or", "isinstance", "(", "char_hidden_layer_type", ",", "keras", ".", "layers", ".", "Layer", ")", ":", "char_hidden_layer", "=", "char_hidden_layer_type", "else", ":", "raise", "NotImplementedError", "(", "'Unknown character hidden layer type: %s'", "%", "char_hidden_layer_type", ")", "if", "not", "isinstance", "(", "char_hidden_layer", ",", "list", ")", ":", "char_hidden_layer", "=", "[", "char_hidden_layer", "]", "for", "i", ",", "layer", "in", "enumerate", "(", "char_hidden_layer", ")", ":", "if", "i", "==", "len", "(", "char_hidden_layer", ")", "-", "1", ":", "name", "=", "'Embedding_Char'", "else", ":", "name", "=", "'Embedding_Char_Pre_%d'", "%", "(", "i", "+", "1", ")", "char_embd_layer", "=", "keras", ".", "layers", ".", "TimeDistributed", "(", "layer", "=", "layer", ",", "name", "=", "name", ")", "(", "char_embd_layer", ")", "embd_layer", "=", "keras", ".", "layers", ".", "Concatenate", "(", "name", "=", "'Embedding'", ",", ")", "(", "[", "word_embd_layer", ",", "char_embd_layer", "]", ")", "return", "[", "word_input_layer", ",", "char_input_layer", "]", ",", "embd_layer" ]
Get the merged embedding layer. :param word_dict_len: The number of words in the dictionary including the ones mapped to 0 or 1. :param char_dict_len: The number of characters in the dictionary including the ones mapped to 0 or 1. :param max_word_len: The maximum allowed length of word. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer.
[ "Get", "the", "merged", "embedding", "layer", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/settings/settings_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/settings/settings_client.py#L122-L142
def set_entries_for_scope(self, entries, user_scope, scope_name, scope_value): """SetEntriesForScope. [Preview API] Set the specified entries for the given named scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to set the settings on (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) """ route_values = {} if user_scope is not None: route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') if scope_name is not None: route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str') if scope_value is not None: route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str') content = self._serialize.body(entries, '{object}') self._send(http_method='PATCH', location_id='4cbaafaf-e8af-4570-98d1-79ee99c56327', version='5.0-preview.1', route_values=route_values, content=content)
[ "def", "set_entries_for_scope", "(", "self", ",", "entries", ",", "user_scope", ",", "scope_name", ",", "scope_value", ")", ":", "route_values", "=", "{", "}", "if", "user_scope", "is", "not", "None", ":", "route_values", "[", "'userScope'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'user_scope'", ",", "user_scope", ",", "'str'", ")", "if", "scope_name", "is", "not", "None", ":", "route_values", "[", "'scopeName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'scope_name'", ",", "scope_name", ",", "'str'", ")", "if", "scope_value", "is", "not", "None", ":", "route_values", "[", "'scopeValue'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'scope_value'", ",", "scope_value", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "entries", ",", "'{object}'", ")", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'4cbaafaf-e8af-4570-98d1-79ee99c56327'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")" ]
SetEntriesForScope. [Preview API] Set the specified entries for the given named scope :param {object} entries: The entries to set :param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to set the settings on (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id)
[ "SetEntriesForScope", ".", "[", "Preview", "API", "]", "Set", "the", "specified", "entries", "for", "the", "given", "named", "scope", ":", "param", "{", "object", "}", "entries", ":", "The", "entries", "to", "set", ":", "param", "str", "user_scope", ":", "User", "-", "Scope", "at", "which", "to", "set", "the", "values", ".", "Should", "be", "me", "for", "the", "current", "user", "or", "host", "for", "all", "users", ".", ":", "param", "str", "scope_name", ":", "Scope", "at", "which", "to", "set", "the", "settings", "on", "(", "e", ".", "g", ".", "project", "or", "team", ")", ":", "param", "str", "scope_value", ":", "Value", "of", "the", "scope", "(", "e", ".", "g", ".", "the", "project", "or", "team", "id", ")" ]
python
train
jobovy/galpy
galpy/orbit/RZOrbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/RZOrbit.py#L215-L259
def Jacobi(self,*args,**kwargs): """ NAME: Jacobi PURPOSE: calculate the Jacobi integral of the motion INPUT: t - (optional) time at which to get the radius OmegaP= pattern speed of rotating frame (scalar) pot= potential instance or list of such instances OUTPUT: Jacobi integral HISTORY: 2011-04-18 - Written - Bovy (NYU) """ if not 'OmegaP' in kwargs or kwargs['OmegaP'] is None: OmegaP= 1. if not 'pot' in kwargs or kwargs['pot'] is None: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit or specify pot=") else: pot= kwargs['pot'] if isinstance(pot,list): for p in pot: if hasattr(p,'OmegaP'): OmegaP= p.OmegaP() break else: if hasattr(pot,'OmegaP'): OmegaP= pot.OmegaP() kwargs.pop('OmegaP',None) else: OmegaP= kwargs.pop('OmegaP') #Make sure you are not using physical coordinates old_physical= kwargs.get('use_physical',None) kwargs['use_physical']= False thiso= self(*args,**kwargs) out= self.E(*args,**kwargs)-OmegaP*thiso[0]*thiso[2] if not old_physical is None: kwargs['use_physical']= old_physical else: kwargs.pop('use_physical') return out
[ "def", "Jacobi", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "'OmegaP'", "in", "kwargs", "or", "kwargs", "[", "'OmegaP'", "]", "is", "None", ":", "OmegaP", "=", "1.", "if", "not", "'pot'", "in", "kwargs", "or", "kwargs", "[", "'pot'", "]", "is", "None", ":", "try", ":", "pot", "=", "self", ".", "_pot", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"Integrate orbit or specify pot=\"", ")", "else", ":", "pot", "=", "kwargs", "[", "'pot'", "]", "if", "isinstance", "(", "pot", ",", "list", ")", ":", "for", "p", "in", "pot", ":", "if", "hasattr", "(", "p", ",", "'OmegaP'", ")", ":", "OmegaP", "=", "p", ".", "OmegaP", "(", ")", "break", "else", ":", "if", "hasattr", "(", "pot", ",", "'OmegaP'", ")", ":", "OmegaP", "=", "pot", ".", "OmegaP", "(", ")", "kwargs", ".", "pop", "(", "'OmegaP'", ",", "None", ")", "else", ":", "OmegaP", "=", "kwargs", ".", "pop", "(", "'OmegaP'", ")", "#Make sure you are not using physical coordinates", "old_physical", "=", "kwargs", ".", "get", "(", "'use_physical'", ",", "None", ")", "kwargs", "[", "'use_physical'", "]", "=", "False", "thiso", "=", "self", "(", "*", "args", ",", "*", "*", "kwargs", ")", "out", "=", "self", ".", "E", "(", "*", "args", ",", "*", "*", "kwargs", ")", "-", "OmegaP", "*", "thiso", "[", "0", "]", "*", "thiso", "[", "2", "]", "if", "not", "old_physical", "is", "None", ":", "kwargs", "[", "'use_physical'", "]", "=", "old_physical", "else", ":", "kwargs", ".", "pop", "(", "'use_physical'", ")", "return", "out" ]
NAME: Jacobi PURPOSE: calculate the Jacobi integral of the motion INPUT: t - (optional) time at which to get the radius OmegaP= pattern speed of rotating frame (scalar) pot= potential instance or list of such instances OUTPUT: Jacobi integral HISTORY: 2011-04-18 - Written - Bovy (NYU)
[ "NAME", ":", "Jacobi", "PURPOSE", ":", "calculate", "the", "Jacobi", "integral", "of", "the", "motion", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "the", "radius", "OmegaP", "=", "pattern", "speed", "of", "rotating", "frame", "(", "scalar", ")", "pot", "=", "potential", "instance", "or", "list", "of", "such", "instances", "OUTPUT", ":", "Jacobi", "integral", "HISTORY", ":", "2011", "-", "04", "-", "18", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
visualfabriq/bquery
bquery/benchmarks/bench_groupby.py
https://github.com/visualfabriq/bquery/blob/3702e974696e22876944a3339affad2f29e1ee06/bquery/benchmarks/bench_groupby.py#L29-L39
def ctime(message=None): "Counts the time spent in some context" global t_elapsed t_elapsed = 0.0 print('\n') t = time.time() yield if message: print(message + ": ", end='') t_elapsed = time.time() - t print(round(t_elapsed, 4), "sec")
[ "def", "ctime", "(", "message", "=", "None", ")", ":", "global", "t_elapsed", "t_elapsed", "=", "0.0", "print", "(", "'\\n'", ")", "t", "=", "time", ".", "time", "(", ")", "yield", "if", "message", ":", "print", "(", "message", "+", "\": \"", ",", "end", "=", "''", ")", "t_elapsed", "=", "time", ".", "time", "(", ")", "-", "t", "print", "(", "round", "(", "t_elapsed", ",", "4", ")", ",", "\"sec\"", ")" ]
Counts the time spent in some context
[ "Counts", "the", "time", "spent", "in", "some", "context" ]
python
train
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py#L28-L106
def fromCSV(csvfile,out=None,fieldnames=None,fmtparams=None,conv_func={}, empty_to_None=[]): """Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None """ import csv import time import datetime if out is None: out = os.path.splitext(csvfile)[0]+".pdl" if fieldnames is None: # read field names in the first line of CSV file reader = csv.reader(open(csvfile)) fieldnames = reader.next() reader = csv.DictReader(open(csvfile),fieldnames,fmtparams) reader.next() # skip first line db = PyDbLite.Base(out) conv_func.update({"__id__":int}) auto_id = not "__id__" in fieldnames fieldnames = [ f for f in fieldnames if not f in ("__id__") ] kw = {"mode":"override"} db.create(*fieldnames,**kw) print db.fields next_id = 0 records = {} while True: try: record = reader.next() except StopIteration: break if auto_id: record["__id__"] = next_id next_id += 1 # replace empty strings by None for field in empty_to_None: if not record[field]: record[field] = None # type conversion for field in conv_func: if not isinstance(conv_func[field],(tuple,list)): record[field] = conv_func[field](record[field]) else: # date or datetime date_class,date_fmt = conv_func[field] if not record[field]: record[field] = None else: time_tuple = time.strptime(record[field],date_fmt) if date_class is datetime.date: time_tuple = time_tuple[:3] record[field] = date_class(*time_tuple) records[record["__id__"]] = record db.records = records db.commit() print len(db) return db
[ "def", "fromCSV", "(", "csvfile", ",", "out", "=", "None", ",", "fieldnames", "=", "None", ",", "fmtparams", "=", "None", ",", "conv_func", "=", "{", "}", ",", "empty_to_None", "=", "[", "]", ")", ":", "import", "csv", "import", "time", "import", "datetime", "if", "out", "is", "None", ":", "out", "=", "os", ".", "path", ".", "splitext", "(", "csvfile", ")", "[", "0", "]", "+", "\".pdl\"", "if", "fieldnames", "is", "None", ":", "# read field names in the first line of CSV file\r", "reader", "=", "csv", ".", "reader", "(", "open", "(", "csvfile", ")", ")", "fieldnames", "=", "reader", ".", "next", "(", ")", "reader", "=", "csv", ".", "DictReader", "(", "open", "(", "csvfile", ")", ",", "fieldnames", ",", "fmtparams", ")", "reader", ".", "next", "(", ")", "# skip first line\r", "db", "=", "PyDbLite", ".", "Base", "(", "out", ")", "conv_func", ".", "update", "(", "{", "\"__id__\"", ":", "int", "}", ")", "auto_id", "=", "not", "\"__id__\"", "in", "fieldnames", "fieldnames", "=", "[", "f", "for", "f", "in", "fieldnames", "if", "not", "f", "in", "(", "\"__id__\"", ")", "]", "kw", "=", "{", "\"mode\"", ":", "\"override\"", "}", "db", ".", "create", "(", "*", "fieldnames", ",", "*", "*", "kw", ")", "print", "db", ".", "fields", "next_id", "=", "0", "records", "=", "{", "}", "while", "True", ":", "try", ":", "record", "=", "reader", ".", "next", "(", ")", "except", "StopIteration", ":", "break", "if", "auto_id", ":", "record", "[", "\"__id__\"", "]", "=", "next_id", "next_id", "+=", "1", "# replace empty strings by None\r", "for", "field", "in", "empty_to_None", ":", "if", "not", "record", "[", "field", "]", ":", "record", "[", "field", "]", "=", "None", "# type conversion\r", "for", "field", "in", "conv_func", ":", "if", "not", "isinstance", "(", "conv_func", "[", "field", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "record", "[", "field", "]", "=", "conv_func", "[", "field", "]", "(", "record", "[", "field", "]", ")", "else", ":", "# date or datetime\r", "date_class", ",", "date_fmt", "=", "conv_func", "[", "field", "]", "if", "not", "record", "[", "field", "]", ":", "record", "[", "field", "]", "=", "None", "else", ":", "time_tuple", "=", "time", ".", "strptime", "(", "record", "[", "field", "]", ",", "date_fmt", ")", "if", "date_class", "is", "datetime", ".", "date", ":", "time_tuple", "=", "time_tuple", "[", ":", "3", "]", "record", "[", "field", "]", "=", "date_class", "(", "*", "time_tuple", ")", "records", "[", "record", "[", "\"__id__\"", "]", "]", "=", "record", "db", ".", "records", "=", "records", "db", ".", "commit", "(", ")", "print", "len", "(", "db", ")", "return", "db" ]
Conversion from CSV to PyDbLite csvfile : name of the CSV file in the file system out : path for the new PyDbLite base in the file system fieldnames : list of field names. If set to None, the field names must be present in the first line of the CSV file fmtparams : the format parameters for the CSV file, as described in the csv module of the standard distribution conv_func is a dictionary mapping a field name to the function used to convert the string read in the CSV to the appropriate Python type. For instance if field "age" must be converted to an integer : conv_func["age"] = int empty_to_None is a list of the fields such that when the value read in the CSV file is the empty string, the field value is set to None
[ "Conversion", "from", "CSV", "to", "PyDbLite", "csvfile", ":", "name", "of", "the", "CSV", "file", "in", "the", "file", "system", "out", ":", "path", "for", "the", "new", "PyDbLite", "base", "in", "the", "file", "system", "fieldnames", ":", "list", "of", "field", "names", ".", "If", "set", "to", "None", "the", "field", "names", "must", "be", "present", "in", "the", "first", "line", "of", "the", "CSV", "file", "fmtparams", ":", "the", "format", "parameters", "for", "the", "CSV", "file", "as", "described", "in", "the", "csv", "module", "of", "the", "standard", "distribution", "conv_func", "is", "a", "dictionary", "mapping", "a", "field", "name", "to", "the", "function", "used", "to", "convert", "the", "string", "read", "in", "the", "CSV", "to", "the", "appropriate", "Python", "type", ".", "For", "instance", "if", "field", "age", "must", "be", "converted", "to", "an", "integer", ":", "conv_func", "[", "age", "]", "=", "int", "empty_to_None", "is", "a", "list", "of", "the", "fields", "such", "that", "when", "the", "value", "read", "in", "the", "CSV", "file", "is", "the", "empty", "string", "the", "field", "value", "is", "set", "to", "None" ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py#L334-L384
def execute_no_results(self, sock_info, generator): """Execute all operations, returning no results (w=0). """ # Cannot have both unacknowledged write and bypass document validation. if self.bypass_doc_val and sock_info.max_wire_version >= 4: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") coll = self.collection # If ordered is True we have to send GLE or use write # commands so we can abort on the first error. write_concern = WriteConcern(w=int(self.ordered)) op_id = _randint() for run in generator: try: if run.op_type == _INSERT: coll._insert( sock_info, run.ops, self.ordered, write_concern=write_concern, op_id=op_id, bypass_doc_val=self.bypass_doc_val) elif run.op_type == _UPDATE: for operation in run.ops: doc = operation['u'] check_keys = True if doc and next(iter(doc)).startswith('$'): check_keys = False coll._update( sock_info, operation['q'], doc, operation['upsert'], check_keys, operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered, bypass_doc_val=self.bypass_doc_val) else: for operation in run.ops: coll._delete(sock_info, operation['q'], not operation['limit'], write_concern, op_id, self.ordered) except OperationFailure: if self.ordered: break
[ "def", "execute_no_results", "(", "self", ",", "sock_info", ",", "generator", ")", ":", "# Cannot have both unacknowledged write and bypass document validation.", "if", "self", ".", "bypass_doc_val", "and", "sock_info", ".", "max_wire_version", ">=", "4", ":", "raise", "OperationFailure", "(", "\"Cannot set bypass_document_validation with\"", "\" unacknowledged write concern\"", ")", "coll", "=", "self", ".", "collection", "# If ordered is True we have to send GLE or use write", "# commands so we can abort on the first error.", "write_concern", "=", "WriteConcern", "(", "w", "=", "int", "(", "self", ".", "ordered", ")", ")", "op_id", "=", "_randint", "(", ")", "for", "run", "in", "generator", ":", "try", ":", "if", "run", ".", "op_type", "==", "_INSERT", ":", "coll", ".", "_insert", "(", "sock_info", ",", "run", ".", "ops", ",", "self", ".", "ordered", ",", "write_concern", "=", "write_concern", ",", "op_id", "=", "op_id", ",", "bypass_doc_val", "=", "self", ".", "bypass_doc_val", ")", "elif", "run", ".", "op_type", "==", "_UPDATE", ":", "for", "operation", "in", "run", ".", "ops", ":", "doc", "=", "operation", "[", "'u'", "]", "check_keys", "=", "True", "if", "doc", "and", "next", "(", "iter", "(", "doc", ")", ")", ".", "startswith", "(", "'$'", ")", ":", "check_keys", "=", "False", "coll", ".", "_update", "(", "sock_info", ",", "operation", "[", "'q'", "]", ",", "doc", ",", "operation", "[", "'upsert'", "]", ",", "check_keys", ",", "operation", "[", "'multi'", "]", ",", "write_concern", "=", "write_concern", ",", "op_id", "=", "op_id", ",", "ordered", "=", "self", ".", "ordered", ",", "bypass_doc_val", "=", "self", ".", "bypass_doc_val", ")", "else", ":", "for", "operation", "in", "run", ".", "ops", ":", "coll", ".", "_delete", "(", "sock_info", ",", "operation", "[", "'q'", "]", ",", "not", "operation", "[", "'limit'", "]", ",", "write_concern", ",", "op_id", ",", "self", ".", "ordered", ")", "except", "OperationFailure", ":", "if", "self", ".", "ordered", ":", "break" ]
Execute all operations, returning no results (w=0).
[ "Execute", "all", "operations", "returning", "no", "results", "(", "w", "=", "0", ")", "." ]
python
train
briancappello/flask-unchained
flask_unchained/bundles/security/services/security_utils_service.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_utils_service.py#L96-L111
def use_double_hash(self, password_hash=None): """ Return a bool indicating whether a password should be hashed twice. """ single_hash = current_app.config.SECURITY_PASSWORD_SINGLE_HASH if single_hash and self.security.password_salt: raise RuntimeError('You may not specify a salt with ' 'SECURITY_PASSWORD_SINGLE_HASH') if password_hash is None: is_plaintext = self.security.password_hash == 'plaintext' else: is_plaintext = \ self.security.pwd_context.identify(password_hash) == 'plaintext' return not (is_plaintext or single_hash)
[ "def", "use_double_hash", "(", "self", ",", "password_hash", "=", "None", ")", ":", "single_hash", "=", "current_app", ".", "config", ".", "SECURITY_PASSWORD_SINGLE_HASH", "if", "single_hash", "and", "self", ".", "security", ".", "password_salt", ":", "raise", "RuntimeError", "(", "'You may not specify a salt with '", "'SECURITY_PASSWORD_SINGLE_HASH'", ")", "if", "password_hash", "is", "None", ":", "is_plaintext", "=", "self", ".", "security", ".", "password_hash", "==", "'plaintext'", "else", ":", "is_plaintext", "=", "self", ".", "security", ".", "pwd_context", ".", "identify", "(", "password_hash", ")", "==", "'plaintext'", "return", "not", "(", "is_plaintext", "or", "single_hash", ")" ]
Return a bool indicating whether a password should be hashed twice.
[ "Return", "a", "bool", "indicating", "whether", "a", "password", "should", "be", "hashed", "twice", "." ]
python
train
ktdreyer/txbugzilla
txbugzilla/__init__.py
https://github.com/ktdreyer/txbugzilla/blob/ccfc6667ce9d696b08b468b25c813cc2b68d30d6/txbugzilla/__init__.py#L149-L157
def _parse_bugs_callback(self, value): """ Fires when we get bug information back from the XML-RPC server. param value: dict of data from XML-RPC server. The "bugs" dict element contains a list of bugs. returns: ``list`` of ``AttrDict`` """ return list(map(lambda x: self._parse_bug(x), value['bugs']))
[ "def", "_parse_bugs_callback", "(", "self", ",", "value", ")", ":", "return", "list", "(", "map", "(", "lambda", "x", ":", "self", ".", "_parse_bug", "(", "x", ")", ",", "value", "[", "'bugs'", "]", ")", ")" ]
Fires when we get bug information back from the XML-RPC server. param value: dict of data from XML-RPC server. The "bugs" dict element contains a list of bugs. returns: ``list`` of ``AttrDict``
[ "Fires", "when", "we", "get", "bug", "information", "back", "from", "the", "XML", "-", "RPC", "server", "." ]
python
train
pallets/werkzeug
src/werkzeug/http.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/http.py#L897-L915
def dump_age(age=None): """Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default). """ if age is None: return if isinstance(age, timedelta): # do the equivalent of Python 2.7's timedelta.total_seconds(), # but disregarding fractional seconds age = age.seconds + (age.days * 24 * 3600) age = int(age) if age < 0: raise ValueError("age cannot be negative") return str(age)
[ "def", "dump_age", "(", "age", "=", "None", ")", ":", "if", "age", "is", "None", ":", "return", "if", "isinstance", "(", "age", ",", "timedelta", ")", ":", "# do the equivalent of Python 2.7's timedelta.total_seconds(),", "# but disregarding fractional seconds", "age", "=", "age", ".", "seconds", "+", "(", "age", ".", "days", "*", "24", "*", "3600", ")", "age", "=", "int", "(", "age", ")", "if", "age", "<", "0", ":", "raise", "ValueError", "(", "\"age cannot be negative\"", ")", "return", "str", "(", "age", ")" ]
Formats the duration as a base-10 integer. :param age: should be an integer number of seconds, a :class:`datetime.timedelta` object, or, if the age is unknown, `None` (default).
[ "Formats", "the", "duration", "as", "a", "base", "-", "10", "integer", "." ]
python
train
tanghaibao/goatools
goatools/godag/relationship_str.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag/relationship_str.py#L74-L77
def str_relationships_rev(self, goobj): """Get a string representing the presence of absence of relationships. Ex: pr..""" rel_cur = goobj.relationship_rev return "".join([self.rev2chr[r] if r in rel_cur else '.' for r in self.rels])
[ "def", "str_relationships_rev", "(", "self", ",", "goobj", ")", ":", "rel_cur", "=", "goobj", ".", "relationship_rev", "return", "\"\"", ".", "join", "(", "[", "self", ".", "rev2chr", "[", "r", "]", "if", "r", "in", "rel_cur", "else", "'.'", "for", "r", "in", "self", ".", "rels", "]", ")" ]
Get a string representing the presence of absence of relationships. Ex: pr..
[ "Get", "a", "string", "representing", "the", "presence", "of", "absence", "of", "relationships", ".", "Ex", ":", "pr", ".." ]
python
train
bitesofcode/projex
projex/addon.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/addon.py#L132-L141
def registerAddonModule(cls, module): """ Registers a module to use to import addon subclasses from. :param module | <str> || <module> """ prop = '_{0}__addon_modules'.format(cls.__name__) mods = getattr(cls, prop, set()) mods.add(module) setattr(cls, prop, mods)
[ "def", "registerAddonModule", "(", "cls", ",", "module", ")", ":", "prop", "=", "'_{0}__addon_modules'", ".", "format", "(", "cls", ".", "__name__", ")", "mods", "=", "getattr", "(", "cls", ",", "prop", ",", "set", "(", ")", ")", "mods", ".", "add", "(", "module", ")", "setattr", "(", "cls", ",", "prop", ",", "mods", ")" ]
Registers a module to use to import addon subclasses from. :param module | <str> || <module>
[ "Registers", "a", "module", "to", "use", "to", "import", "addon", "subclasses", "from", ".", ":", "param", "module", "|", "<str", ">", "||", "<module", ">" ]
python
train
arista-eosplus/pyeapi
pyeapi/api/users.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/users.py#L230-L252
def set_privilege(self, name, value=None): """Configures the user privilege value in EOS Args: name (str): The name of the user to craete value (int): The privilege value to assign to the user. Valid values are in the range of 0 to 15 Returns: True if the operation was successful otherwise False Raises: TypeError: if the value is not in the valid range """ cmd = 'username %s' % name if value is not None: if not isprivilege(value): raise TypeError('priviledge value must be between 0 and 15') cmd += ' privilege %s' % value else: cmd += ' privilege 1' return self.configure(cmd)
[ "def", "set_privilege", "(", "self", ",", "name", ",", "value", "=", "None", ")", ":", "cmd", "=", "'username %s'", "%", "name", "if", "value", "is", "not", "None", ":", "if", "not", "isprivilege", "(", "value", ")", ":", "raise", "TypeError", "(", "'priviledge value must be between 0 and 15'", ")", "cmd", "+=", "' privilege %s'", "%", "value", "else", ":", "cmd", "+=", "' privilege 1'", "return", "self", ".", "configure", "(", "cmd", ")" ]
Configures the user privilege value in EOS Args: name (str): The name of the user to craete value (int): The privilege value to assign to the user. Valid values are in the range of 0 to 15 Returns: True if the operation was successful otherwise False Raises: TypeError: if the value is not in the valid range
[ "Configures", "the", "user", "privilege", "value", "in", "EOS" ]
python
train
kennethreitz/omnijson
omnijson/packages/simplejson/__init__.py
https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/packages/simplejson/__init__.py#L276-L329
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, use_decimal=False, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. If *use_decimal* is true (default: ``False``) then it implies parse_float=decimal.Decimal for parity with ``dump``. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, use_decimal=use_decimal, **kw)
[ "def", "load", "(", "fp", ",", "encoding", "=", "None", ",", "cls", "=", "None", ",", "object_hook", "=", "None", ",", "parse_float", "=", "None", ",", "parse_int", "=", "None", ",", "parse_constant", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "use_decimal", "=", "False", ",", "*", "*", "kw", ")", ":", "return", "loads", "(", "fp", ".", "read", "(", ")", ",", "encoding", "=", "encoding", ",", "cls", "=", "cls", ",", "object_hook", "=", "object_hook", ",", "parse_float", "=", "parse_float", ",", "parse_int", "=", "parse_int", ",", "parse_constant", "=", "parse_constant", ",", "object_pairs_hook", "=", "object_pairs_hook", ",", "use_decimal", "=", "use_decimal", ",", "*", "*", "kw", ")" ]
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. If *use_decimal* is true (default: ``False``) then it implies parse_float=decimal.Decimal for parity with ``dump``. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg.
[ "Deserialize", "fp", "(", "a", ".", "read", "()", "-", "supporting", "file", "-", "like", "object", "containing", "a", "JSON", "document", ")", "to", "a", "Python", "object", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/debugger/interactive_debugger_server_lib.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/interactive_debugger_server_lib.py#L48-L52
def _extract_device_name_from_event(event): """Extract device name from a tf.Event proto carrying tensor value.""" plugin_data_content = json.loads( tf.compat.as_str(event.summary.value[0].metadata.plugin_data.content)) return plugin_data_content['device']
[ "def", "_extract_device_name_from_event", "(", "event", ")", ":", "plugin_data_content", "=", "json", ".", "loads", "(", "tf", ".", "compat", ".", "as_str", "(", "event", ".", "summary", ".", "value", "[", "0", "]", ".", "metadata", ".", "plugin_data", ".", "content", ")", ")", "return", "plugin_data_content", "[", "'device'", "]" ]
Extract device name from a tf.Event proto carrying tensor value.
[ "Extract", "device", "name", "from", "a", "tf", ".", "Event", "proto", "carrying", "tensor", "value", "." ]
python
train
senaite/senaite.core
bika/lims/subscribers/auditlog.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/subscribers/auditlog.py#L27-L54
def ObjectTransitionedEventHandler(obj, event): """Object has been transitioned to an new state """ # only snapshot supported objects if not supports_snapshots(obj): return # default transition entry entry = { "modified": DateTime().ISO(), "action": event.action, } # get the last history item history = api.get_review_history(obj, rev=True) if history: entry = history[0] # make transitions also a modification entry timestamp = entry.pop("time", DateTime()) entry["modified"] = timestamp.ISO() entry["action"] = event.action # take a new snapshot take_snapshot(obj, **entry) # reindex the object in the auditlog catalog reindex_object(obj)
[ "def", "ObjectTransitionedEventHandler", "(", "obj", ",", "event", ")", ":", "# only snapshot supported objects", "if", "not", "supports_snapshots", "(", "obj", ")", ":", "return", "# default transition entry", "entry", "=", "{", "\"modified\"", ":", "DateTime", "(", ")", ".", "ISO", "(", ")", ",", "\"action\"", ":", "event", ".", "action", ",", "}", "# get the last history item", "history", "=", "api", ".", "get_review_history", "(", "obj", ",", "rev", "=", "True", ")", "if", "history", ":", "entry", "=", "history", "[", "0", "]", "# make transitions also a modification entry", "timestamp", "=", "entry", ".", "pop", "(", "\"time\"", ",", "DateTime", "(", ")", ")", "entry", "[", "\"modified\"", "]", "=", "timestamp", ".", "ISO", "(", ")", "entry", "[", "\"action\"", "]", "=", "event", ".", "action", "# take a new snapshot", "take_snapshot", "(", "obj", ",", "*", "*", "entry", ")", "# reindex the object in the auditlog catalog", "reindex_object", "(", "obj", ")" ]
Object has been transitioned to an new state
[ "Object", "has", "been", "transitioned", "to", "an", "new", "state" ]
python
train
huggingface/pytorch-pretrained-BERT
pytorch_pretrained_bert/tokenization.py
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L269-L280
def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output)
[ "def", "_tokenize_chinese_chars", "(", "self", ",", "text", ")", ":", "output", "=", "[", "]", "for", "char", "in", "text", ":", "cp", "=", "ord", "(", "char", ")", "if", "self", ".", "_is_chinese_char", "(", "cp", ")", ":", "output", ".", "append", "(", "\" \"", ")", "output", ".", "append", "(", "char", ")", "output", ".", "append", "(", "\" \"", ")", "else", ":", "output", ".", "append", "(", "char", ")", "return", "\"\"", ".", "join", "(", "output", ")" ]
Adds whitespace around any CJK character.
[ "Adds", "whitespace", "around", "any", "CJK", "character", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/ie_history.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/ie_history.py#L93-L135
def _GetRecord(self, offset, record_size): """Retrieve a single record from the file. Args: offset: offset from start of input_dat where header starts record_size: length of the header according to file (untrusted) Returns: A dict containing a single browser history record. """ record_header = "<4sLQQL" get4 = lambda x: struct.unpack("<L", self.input_dat[x:x + 4])[0] url_offset = struct.unpack("B", self.input_dat[offset + 52:offset + 53])[0] if url_offset in [0xFF, 0xFE]: return None data_offset = get4(offset + 68) data_size = get4(offset + 72) start_pos = offset + data_offset data = struct.unpack("{0}s".format(data_size), self.input_dat[start_pos:start_pos + data_size])[0] fmt = record_header unknown_size = url_offset - struct.calcsize(fmt) fmt += "{0}s".format(unknown_size) fmt += "{0}s".format(record_size - struct.calcsize(fmt)) dat = struct.unpack(fmt, self.input_dat[offset:offset + record_size]) header, blocks, mtime, ctime, ftime, _, url = dat url = url.split(b"\x00")[0].decode("utf-8") if mtime: mtime = mtime // 10 - WIN_UNIX_DIFF_MSECS if ctime: ctime = ctime // 10 - WIN_UNIX_DIFF_MSECS return { "header": header, # the header "blocks": blocks, # number of blocks "urloffset": url_offset, # offset of URL in file "data_offset": data_offset, # offset for start of data "data_size": data_size, # size of data "data": data, # actual data "mtime": mtime, # modified time "ctime": ctime, # created time "ftime": ftime, # file time "url": url # the url visited }
[ "def", "_GetRecord", "(", "self", ",", "offset", ",", "record_size", ")", ":", "record_header", "=", "\"<4sLQQL\"", "get4", "=", "lambda", "x", ":", "struct", ".", "unpack", "(", "\"<L\"", ",", "self", ".", "input_dat", "[", "x", ":", "x", "+", "4", "]", ")", "[", "0", "]", "url_offset", "=", "struct", ".", "unpack", "(", "\"B\"", ",", "self", ".", "input_dat", "[", "offset", "+", "52", ":", "offset", "+", "53", "]", ")", "[", "0", "]", "if", "url_offset", "in", "[", "0xFF", ",", "0xFE", "]", ":", "return", "None", "data_offset", "=", "get4", "(", "offset", "+", "68", ")", "data_size", "=", "get4", "(", "offset", "+", "72", ")", "start_pos", "=", "offset", "+", "data_offset", "data", "=", "struct", ".", "unpack", "(", "\"{0}s\"", ".", "format", "(", "data_size", ")", ",", "self", ".", "input_dat", "[", "start_pos", ":", "start_pos", "+", "data_size", "]", ")", "[", "0", "]", "fmt", "=", "record_header", "unknown_size", "=", "url_offset", "-", "struct", ".", "calcsize", "(", "fmt", ")", "fmt", "+=", "\"{0}s\"", ".", "format", "(", "unknown_size", ")", "fmt", "+=", "\"{0}s\"", ".", "format", "(", "record_size", "-", "struct", ".", "calcsize", "(", "fmt", ")", ")", "dat", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "input_dat", "[", "offset", ":", "offset", "+", "record_size", "]", ")", "header", ",", "blocks", ",", "mtime", ",", "ctime", ",", "ftime", ",", "_", ",", "url", "=", "dat", "url", "=", "url", ".", "split", "(", "b\"\\x00\"", ")", "[", "0", "]", ".", "decode", "(", "\"utf-8\"", ")", "if", "mtime", ":", "mtime", "=", "mtime", "//", "10", "-", "WIN_UNIX_DIFF_MSECS", "if", "ctime", ":", "ctime", "=", "ctime", "//", "10", "-", "WIN_UNIX_DIFF_MSECS", "return", "{", "\"header\"", ":", "header", ",", "# the header", "\"blocks\"", ":", "blocks", ",", "# number of blocks", "\"urloffset\"", ":", "url_offset", ",", "# offset of URL in file", "\"data_offset\"", ":", "data_offset", ",", "# offset for start of data", "\"data_size\"", ":", "data_size", ",", "# size of data", "\"data\"", ":", "data", ",", "# actual data", "\"mtime\"", ":", "mtime", ",", "# modified time", "\"ctime\"", ":", "ctime", ",", "# created time", "\"ftime\"", ":", "ftime", ",", "# file time", "\"url\"", ":", "url", "# the url visited", "}" ]
Retrieve a single record from the file. Args: offset: offset from start of input_dat where header starts record_size: length of the header according to file (untrusted) Returns: A dict containing a single browser history record.
[ "Retrieve", "a", "single", "record", "from", "the", "file", "." ]
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L170-L183
def add_res(acc, elem): """ Adds results to the accumulator :param acc: :param elem: :return: """ if not isinstance(elem, list): elem = [elem] if acc is None: acc = [] for x in elem: acc.append(x) return acc
[ "def", "add_res", "(", "acc", ",", "elem", ")", ":", "if", "not", "isinstance", "(", "elem", ",", "list", ")", ":", "elem", "=", "[", "elem", "]", "if", "acc", "is", "None", ":", "acc", "=", "[", "]", "for", "x", "in", "elem", ":", "acc", ".", "append", "(", "x", ")", "return", "acc" ]
Adds results to the accumulator :param acc: :param elem: :return:
[ "Adds", "results", "to", "the", "accumulator", ":", "param", "acc", ":", ":", "param", "elem", ":", ":", "return", ":" ]
python
train
PMEAL/OpenPNM
openpnm/algorithms/MixedInvasionPercolation.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/MixedInvasionPercolation.py#L64-L140
def setup(self, phase=None, pore_entry_pressure='pore.entry_pressure', throat_entry_pressure='throat.entry_pressure', snap_off='', invade_isolated_Ts=False, late_pore_filling='', late_throat_filling='', cooperative_pore_filling=''): r""" Used to specify necessary arguments to the simulation. This method is useful for resetting the algorithm or applying more explicit control. Parameters ---------- phase : OpenPNM Phase object The Phase object containing the physical properties of the invading fluid. pore_entry_pressure : string The dictionary key on the Phase object where the pore entry pressure values are stored. The default is 'pore.entry_pressure'. throat_entry_pressure : string The dictionary key on the Phase object where the throat entry pressure values are stored. The default is 'throat.entry_pressure'. snap_off : string The dictionary key on the Phase object where the throat snap-off pressure values are stored. invade_isolated_Ts : boolean If True, isolated throats are invaded at the higher invasion pressure of their connected pores. late_pore_filling : string The name of the model used to determine late pore filling as a function of applied pressure. late_throat_filling : string The name of the model used to determine late throat filling as a function of applied pressure. cooperative_pore_filling : string The name of the model used to determine the meniscus properties required for assessing cooperative pore filling. """ if phase: self.settings['phase'] = phase.name if throat_entry_pressure: self.settings['throat_entry_pressure'] = throat_entry_pressure phase = self.project.find_phase(self) self['throat.entry_pressure'] = \ phase[self.settings['throat_entry_pressure']] if len(np.shape(self['throat.entry_pressure'])) > 1: self._bidirectional = True else: self._bidirectional = False if pore_entry_pressure: self.settings['pore_entry_pressure'] = pore_entry_pressure phase = self.project.find_phase(self) self['pore.entry_pressure'] = \ phase[self.settings['pore_entry_pressure']] if snap_off: self.settings['snap_off'] = snap_off if invade_isolated_Ts: self.settings['invade_isolated_Ts'] = invade_isolated_Ts if late_pore_filling: self.settings['late_pore_filling'] = late_pore_filling if late_throat_filling: self.settings['late_throat_filling'] = late_throat_filling if cooperative_pore_filling: self.settings['cooperative_pore_filling'] = \ cooperative_pore_filling self.reset()
[ "def", "setup", "(", "self", ",", "phase", "=", "None", ",", "pore_entry_pressure", "=", "'pore.entry_pressure'", ",", "throat_entry_pressure", "=", "'throat.entry_pressure'", ",", "snap_off", "=", "''", ",", "invade_isolated_Ts", "=", "False", ",", "late_pore_filling", "=", "''", ",", "late_throat_filling", "=", "''", ",", "cooperative_pore_filling", "=", "''", ")", ":", "if", "phase", ":", "self", ".", "settings", "[", "'phase'", "]", "=", "phase", ".", "name", "if", "throat_entry_pressure", ":", "self", ".", "settings", "[", "'throat_entry_pressure'", "]", "=", "throat_entry_pressure", "phase", "=", "self", ".", "project", ".", "find_phase", "(", "self", ")", "self", "[", "'throat.entry_pressure'", "]", "=", "phase", "[", "self", ".", "settings", "[", "'throat_entry_pressure'", "]", "]", "if", "len", "(", "np", ".", "shape", "(", "self", "[", "'throat.entry_pressure'", "]", ")", ")", ">", "1", ":", "self", ".", "_bidirectional", "=", "True", "else", ":", "self", ".", "_bidirectional", "=", "False", "if", "pore_entry_pressure", ":", "self", ".", "settings", "[", "'pore_entry_pressure'", "]", "=", "pore_entry_pressure", "phase", "=", "self", ".", "project", ".", "find_phase", "(", "self", ")", "self", "[", "'pore.entry_pressure'", "]", "=", "phase", "[", "self", ".", "settings", "[", "'pore_entry_pressure'", "]", "]", "if", "snap_off", ":", "self", ".", "settings", "[", "'snap_off'", "]", "=", "snap_off", "if", "invade_isolated_Ts", ":", "self", ".", "settings", "[", "'invade_isolated_Ts'", "]", "=", "invade_isolated_Ts", "if", "late_pore_filling", ":", "self", ".", "settings", "[", "'late_pore_filling'", "]", "=", "late_pore_filling", "if", "late_throat_filling", ":", "self", ".", "settings", "[", "'late_throat_filling'", "]", "=", "late_throat_filling", "if", "cooperative_pore_filling", ":", "self", ".", "settings", "[", "'cooperative_pore_filling'", "]", "=", "cooperative_pore_filling", "self", ".", "reset", "(", ")" ]
r""" Used to specify necessary arguments to the simulation. This method is useful for resetting the algorithm or applying more explicit control. Parameters ---------- phase : OpenPNM Phase object The Phase object containing the physical properties of the invading fluid. pore_entry_pressure : string The dictionary key on the Phase object where the pore entry pressure values are stored. The default is 'pore.entry_pressure'. throat_entry_pressure : string The dictionary key on the Phase object where the throat entry pressure values are stored. The default is 'throat.entry_pressure'. snap_off : string The dictionary key on the Phase object where the throat snap-off pressure values are stored. invade_isolated_Ts : boolean If True, isolated throats are invaded at the higher invasion pressure of their connected pores. late_pore_filling : string The name of the model used to determine late pore filling as a function of applied pressure. late_throat_filling : string The name of the model used to determine late throat filling as a function of applied pressure. cooperative_pore_filling : string The name of the model used to determine the meniscus properties required for assessing cooperative pore filling.
[ "r", "Used", "to", "specify", "necessary", "arguments", "to", "the", "simulation", ".", "This", "method", "is", "useful", "for", "resetting", "the", "algorithm", "or", "applying", "more", "explicit", "control", "." ]
python
train
dmlc/xgboost
python-package/xgboost/core.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L498-L527
def _init_from_dt(self, data, nthread): """ Initialize data from a datatable Frame. """ ptrs = (ctypes.c_void_p * data.ncols)() if hasattr(data, "internal") and hasattr(data.internal, "column"): # datatable>0.8.0 for icol in range(data.ncols): col = data.internal.column(icol) ptr = col.data_pointer ptrs[icol] = ctypes.c_void_p(ptr) else: # datatable<=0.8.0 from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error for icol in range(data.ncols): ptrs[icol] = frame_column_data_r(data, icol) # always return stypes for dt ingestion feature_type_strings = (ctypes.c_char_p * data.ncols)() for icol in range(data.ncols): feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8')) handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromDT( ptrs, feature_type_strings, c_bst_ulong(data.shape[0]), c_bst_ulong(data.shape[1]), ctypes.byref(handle), nthread)) self.handle = handle
[ "def", "_init_from_dt", "(", "self", ",", "data", ",", "nthread", ")", ":", "ptrs", "=", "(", "ctypes", ".", "c_void_p", "*", "data", ".", "ncols", ")", "(", ")", "if", "hasattr", "(", "data", ",", "\"internal\"", ")", "and", "hasattr", "(", "data", ".", "internal", ",", "\"column\"", ")", ":", "# datatable>0.8.0", "for", "icol", "in", "range", "(", "data", ".", "ncols", ")", ":", "col", "=", "data", ".", "internal", ".", "column", "(", "icol", ")", "ptr", "=", "col", ".", "data_pointer", "ptrs", "[", "icol", "]", "=", "ctypes", ".", "c_void_p", "(", "ptr", ")", "else", ":", "# datatable<=0.8.0", "from", "datatable", ".", "internal", "import", "frame_column_data_r", "# pylint: disable=no-name-in-module,import-error", "for", "icol", "in", "range", "(", "data", ".", "ncols", ")", ":", "ptrs", "[", "icol", "]", "=", "frame_column_data_r", "(", "data", ",", "icol", ")", "# always return stypes for dt ingestion", "feature_type_strings", "=", "(", "ctypes", ".", "c_char_p", "*", "data", ".", "ncols", ")", "(", ")", "for", "icol", "in", "range", "(", "data", ".", "ncols", ")", ":", "feature_type_strings", "[", "icol", "]", "=", "ctypes", ".", "c_char_p", "(", "data", ".", "stypes", "[", "icol", "]", ".", "name", ".", "encode", "(", "'utf-8'", ")", ")", "handle", "=", "ctypes", ".", "c_void_p", "(", ")", "_check_call", "(", "_LIB", ".", "XGDMatrixCreateFromDT", "(", "ptrs", ",", "feature_type_strings", ",", "c_bst_ulong", "(", "data", ".", "shape", "[", "0", "]", ")", ",", "c_bst_ulong", "(", "data", ".", "shape", "[", "1", "]", ")", ",", "ctypes", ".", "byref", "(", "handle", ")", ",", "nthread", ")", ")", "self", ".", "handle", "=", "handle" ]
Initialize data from a datatable Frame.
[ "Initialize", "data", "from", "a", "datatable", "Frame", "." ]
python
train
christian-oudard/htmltreediff
htmltreediff/diff_core.py
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/diff_core.py#L224-L228
def match_indices(match): """Yield index tuples (old_index, new_index) for each place in the match.""" a, b, size = match for i in range(size): yield a + i, b + i
[ "def", "match_indices", "(", "match", ")", ":", "a", ",", "b", ",", "size", "=", "match", "for", "i", "in", "range", "(", "size", ")", ":", "yield", "a", "+", "i", ",", "b", "+", "i" ]
Yield index tuples (old_index, new_index) for each place in the match.
[ "Yield", "index", "tuples", "(", "old_index", "new_index", ")", "for", "each", "place", "in", "the", "match", "." ]
python
train
pandas-dev/pandas
pandas/core/arrays/_ranges.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/_ranges.py#L149-L187
def _generate_range_overflow_safe_signed(endpoint, periods, stride, side): """ A special case for _generate_range_overflow_safe where `periods * stride` can be calculated without overflowing int64 bounds. """ assert side in ['start', 'end'] if side == 'end': stride *= -1 with np.errstate(over="raise"): addend = np.int64(periods) * np.int64(stride) try: # easy case with no overflows return np.int64(endpoint) + addend except (FloatingPointError, OverflowError): # with endpoint negative and addend positive we risk # FloatingPointError; with reversed signed we risk OverflowError pass # if stride and endpoint had opposite signs, then endpoint + addend # should never overflow. so they must have the same signs assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0) if stride > 0: # watch out for very special case in which we just slightly # exceed implementation bounds, but when passing the result to # np.arange will get a result slightly within the bounds assert endpoint >= 0 result = np.uint64(endpoint) + np.uint64(addend) i64max = np.uint64(np.iinfo(np.int64).max) assert result > i64max if result <= i64max + np.uint64(stride): return result raise OutOfBoundsDatetime('Cannot generate range with ' '{side}={endpoint} and ' 'periods={periods}' .format(side=side, endpoint=endpoint, periods=periods))
[ "def", "_generate_range_overflow_safe_signed", "(", "endpoint", ",", "periods", ",", "stride", ",", "side", ")", ":", "assert", "side", "in", "[", "'start'", ",", "'end'", "]", "if", "side", "==", "'end'", ":", "stride", "*=", "-", "1", "with", "np", ".", "errstate", "(", "over", "=", "\"raise\"", ")", ":", "addend", "=", "np", ".", "int64", "(", "periods", ")", "*", "np", ".", "int64", "(", "stride", ")", "try", ":", "# easy case with no overflows", "return", "np", ".", "int64", "(", "endpoint", ")", "+", "addend", "except", "(", "FloatingPointError", ",", "OverflowError", ")", ":", "# with endpoint negative and addend positive we risk", "# FloatingPointError; with reversed signed we risk OverflowError", "pass", "# if stride and endpoint had opposite signs, then endpoint + addend", "# should never overflow. so they must have the same signs", "assert", "(", "stride", ">", "0", "and", "endpoint", ">=", "0", ")", "or", "(", "stride", "<", "0", "and", "endpoint", "<=", "0", ")", "if", "stride", ">", "0", ":", "# watch out for very special case in which we just slightly", "# exceed implementation bounds, but when passing the result to", "# np.arange will get a result slightly within the bounds", "assert", "endpoint", ">=", "0", "result", "=", "np", ".", "uint64", "(", "endpoint", ")", "+", "np", ".", "uint64", "(", "addend", ")", "i64max", "=", "np", ".", "uint64", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "max", ")", "assert", "result", ">", "i64max", "if", "result", "<=", "i64max", "+", "np", ".", "uint64", "(", "stride", ")", ":", "return", "result", "raise", "OutOfBoundsDatetime", "(", "'Cannot generate range with '", "'{side}={endpoint} and '", "'periods={periods}'", ".", "format", "(", "side", "=", "side", ",", "endpoint", "=", "endpoint", ",", "periods", "=", "periods", ")", ")" ]
A special case for _generate_range_overflow_safe where `periods * stride` can be calculated without overflowing int64 bounds.
[ "A", "special", "case", "for", "_generate_range_overflow_safe", "where", "periods", "*", "stride", "can", "be", "calculated", "without", "overflowing", "int64", "bounds", "." ]
python
train
dhermes/bezier
src/bezier/_algebraic_intersection.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_algebraic_intersection.py#L1304-L1358
def poly_to_power_basis(bezier_coeffs): """Convert a B |eacute| zier curve to polynomial in power basis. .. note:: This assumes, but does not verify, that the "B |eacute| zier degree" matches the true degree of the curve. Callers can guarantee this by calling :func:`.full_reduce`. Args: bezier_coeffs (numpy.ndarray): A 1D array of coefficients in the Bernstein basis. Returns: numpy.ndarray: 1D array of coefficients in monomial basis. Raises: .UnsupportedDegree: If the degree of the curve is not among 0, 1, 2 or 3. """ num_coeffs, = bezier_coeffs.shape if num_coeffs == 1: return bezier_coeffs elif num_coeffs == 2: # C0 (1 - s) + C1 s = C0 + (C1 - C0) s coeff0, coeff1 = bezier_coeffs return np.asfortranarray([coeff0, coeff1 - coeff0]) elif num_coeffs == 3: # C0 (1 - s)^2 + C1 2 (1 - s) s + C2 s^2 # = C0 + 2(C1 - C0) s + (C2 - 2 C1 + C0) s^2 coeff0, coeff1, coeff2 = bezier_coeffs return np.asfortranarray( [coeff0, 2.0 * (coeff1 - coeff0), coeff2 - 2.0 * coeff1 + coeff0] ) elif num_coeffs == 4: # C0 (1 - s)^3 + C1 3 (1 - s)^2 + C2 3 (1 - s) s^2 + C3 s^3 # = C0 + 3(C1 - C0) s + 3(C2 - 2 C1 + C0) s^2 + # (C3 - 3 C2 + 3 C1 - C0) s^3 coeff0, coeff1, coeff2, coeff3 = bezier_coeffs return np.asfortranarray( [ coeff0, 3.0 * (coeff1 - coeff0), 3.0 * (coeff2 - 2.0 * coeff1 + coeff0), coeff3 - 3.0 * coeff2 + 3.0 * coeff1 - coeff0, ] ) else: raise _helpers.UnsupportedDegree( num_coeffs - 1, supported=(0, 1, 2, 3) )
[ "def", "poly_to_power_basis", "(", "bezier_coeffs", ")", ":", "num_coeffs", ",", "=", "bezier_coeffs", ".", "shape", "if", "num_coeffs", "==", "1", ":", "return", "bezier_coeffs", "elif", "num_coeffs", "==", "2", ":", "# C0 (1 - s) + C1 s = C0 + (C1 - C0) s", "coeff0", ",", "coeff1", "=", "bezier_coeffs", "return", "np", ".", "asfortranarray", "(", "[", "coeff0", ",", "coeff1", "-", "coeff0", "]", ")", "elif", "num_coeffs", "==", "3", ":", "# C0 (1 - s)^2 + C1 2 (1 - s) s + C2 s^2", "# = C0 + 2(C1 - C0) s + (C2 - 2 C1 + C0) s^2", "coeff0", ",", "coeff1", ",", "coeff2", "=", "bezier_coeffs", "return", "np", ".", "asfortranarray", "(", "[", "coeff0", ",", "2.0", "*", "(", "coeff1", "-", "coeff0", ")", ",", "coeff2", "-", "2.0", "*", "coeff1", "+", "coeff0", "]", ")", "elif", "num_coeffs", "==", "4", ":", "# C0 (1 - s)^3 + C1 3 (1 - s)^2 + C2 3 (1 - s) s^2 + C3 s^3", "# = C0 + 3(C1 - C0) s + 3(C2 - 2 C1 + C0) s^2 +", "# (C3 - 3 C2 + 3 C1 - C0) s^3", "coeff0", ",", "coeff1", ",", "coeff2", ",", "coeff3", "=", "bezier_coeffs", "return", "np", ".", "asfortranarray", "(", "[", "coeff0", ",", "3.0", "*", "(", "coeff1", "-", "coeff0", ")", ",", "3.0", "*", "(", "coeff2", "-", "2.0", "*", "coeff1", "+", "coeff0", ")", ",", "coeff3", "-", "3.0", "*", "coeff2", "+", "3.0", "*", "coeff1", "-", "coeff0", ",", "]", ")", "else", ":", "raise", "_helpers", ".", "UnsupportedDegree", "(", "num_coeffs", "-", "1", ",", "supported", "=", "(", "0", ",", "1", ",", "2", ",", "3", ")", ")" ]
Convert a B |eacute| zier curve to polynomial in power basis. .. note:: This assumes, but does not verify, that the "B |eacute| zier degree" matches the true degree of the curve. Callers can guarantee this by calling :func:`.full_reduce`. Args: bezier_coeffs (numpy.ndarray): A 1D array of coefficients in the Bernstein basis. Returns: numpy.ndarray: 1D array of coefficients in monomial basis. Raises: .UnsupportedDegree: If the degree of the curve is not among 0, 1, 2 or 3.
[ "Convert", "a", "B", "|eacute|", "zier", "curve", "to", "polynomial", "in", "power", "basis", "." ]
python
train
python-diamond/Diamond
src/diamond/handler/multigraphite.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/handler/multigraphite.py#L36-L52
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(MultiGraphiteHandler, self).get_default_config_help() config.update({ 'host': 'Hostname, Hostname, Hostname', 'port': 'Port', 'proto': 'udp or tcp', 'timeout': '', 'batch': 'How many to store before sending to the graphite server', 'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA 'trim_backlog_multiplier': 'Trim down how many batches', }) return config
[ "def", "get_default_config_help", "(", "self", ")", ":", "config", "=", "super", "(", "MultiGraphiteHandler", ",", "self", ")", ".", "get_default_config_help", "(", ")", "config", ".", "update", "(", "{", "'host'", ":", "'Hostname, Hostname, Hostname'", ",", "'port'", ":", "'Port'", ",", "'proto'", ":", "'udp or tcp'", ",", "'timeout'", ":", "''", ",", "'batch'", ":", "'How many to store before sending to the graphite server'", ",", "'max_backlog_multiplier'", ":", "'how many batches to store before trimming'", ",", "# NOQA", "'trim_backlog_multiplier'", ":", "'Trim down how many batches'", ",", "}", ")", "return", "config" ]
Returns the help text for the configuration options for this handler
[ "Returns", "the", "help", "text", "for", "the", "configuration", "options", "for", "this", "handler" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L300-L310
def get_bool_relative(strings: Sequence[str], prefix1: str, delta: int, prefix2: str, ignoreleadingcolon: bool = False) -> Optional[bool]: """ Fetches a boolean parameter via :func:`get_string_relative`. """ return get_bool_raw(get_string_relative( strings, prefix1, delta, prefix2, ignoreleadingcolon=ignoreleadingcolon))
[ "def", "get_bool_relative", "(", "strings", ":", "Sequence", "[", "str", "]", ",", "prefix1", ":", "str", ",", "delta", ":", "int", ",", "prefix2", ":", "str", ",", "ignoreleadingcolon", ":", "bool", "=", "False", ")", "->", "Optional", "[", "bool", "]", ":", "return", "get_bool_raw", "(", "get_string_relative", "(", "strings", ",", "prefix1", ",", "delta", ",", "prefix2", ",", "ignoreleadingcolon", "=", "ignoreleadingcolon", ")", ")" ]
Fetches a boolean parameter via :func:`get_string_relative`.
[ "Fetches", "a", "boolean", "parameter", "via", ":", "func", ":", "get_string_relative", "." ]
python
train
pydanny/simplicity
simplicity.py
https://github.com/pydanny/simplicity/blob/aef4ce39b0965b8d333c67c9d6ec5baecee9c617/simplicity.py#L31-L92
def rst_to_json(text): """ I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach. """ records = [] last_type = None key = None data = {} directive = False lines = text.splitlines() for index, line in enumerate(lines): # check for directives if len(line) and line.strip().startswith(".."): directive = True continue # set the title if len(line) and (line[0] in string.ascii_letters or line[0].isdigit()): directive = False try: if lines[index + 1][0] not in DIVIDERS: continue except IndexError: continue data = text_cleanup(data, key, last_type) data = {"title": line.strip()} records.append( data ) continue # Grab standard fields (int, string, float) if len(line) and line[0].startswith(":"): data = text_cleanup(data, key, last_type) index = line.index(":", 1) key = line[1:index] value = line[index + 1:].strip() data[key], last_type = type_converter(value) directive = False continue # Work on multi-line strings if len(line) and line[0].startswith(" ") and directive == False: if not isinstance(data[key], str): # Not a string so continue on continue value = line.strip() if not len(value): # empty string, continue on continue # add next line data[key] += "\n{}".format(value) continue if last_type == STRING_TYPE and not len(line): if key in data.keys(): data[key] += "\n" return json.dumps(records)
[ "def", "rst_to_json", "(", "text", ")", ":", "records", "=", "[", "]", "last_type", "=", "None", "key", "=", "None", "data", "=", "{", "}", "directive", "=", "False", "lines", "=", "text", ".", "splitlines", "(", ")", "for", "index", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "# check for directives", "if", "len", "(", "line", ")", "and", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"..\"", ")", ":", "directive", "=", "True", "continue", "# set the title", "if", "len", "(", "line", ")", "and", "(", "line", "[", "0", "]", "in", "string", ".", "ascii_letters", "or", "line", "[", "0", "]", ".", "isdigit", "(", ")", ")", ":", "directive", "=", "False", "try", ":", "if", "lines", "[", "index", "+", "1", "]", "[", "0", "]", "not", "in", "DIVIDERS", ":", "continue", "except", "IndexError", ":", "continue", "data", "=", "text_cleanup", "(", "data", ",", "key", ",", "last_type", ")", "data", "=", "{", "\"title\"", ":", "line", ".", "strip", "(", ")", "}", "records", ".", "append", "(", "data", ")", "continue", "# Grab standard fields (int, string, float)", "if", "len", "(", "line", ")", "and", "line", "[", "0", "]", ".", "startswith", "(", "\":\"", ")", ":", "data", "=", "text_cleanup", "(", "data", ",", "key", ",", "last_type", ")", "index", "=", "line", ".", "index", "(", "\":\"", ",", "1", ")", "key", "=", "line", "[", "1", ":", "index", "]", "value", "=", "line", "[", "index", "+", "1", ":", "]", ".", "strip", "(", ")", "data", "[", "key", "]", ",", "last_type", "=", "type_converter", "(", "value", ")", "directive", "=", "False", "continue", "# Work on multi-line strings", "if", "len", "(", "line", ")", "and", "line", "[", "0", "]", ".", "startswith", "(", "\" \"", ")", "and", "directive", "==", "False", ":", "if", "not", "isinstance", "(", "data", "[", "key", "]", ",", "str", ")", ":", "# Not a string so continue on", "continue", "value", "=", "line", ".", "strip", "(", ")", "if", "not", "len", "(", "value", ")", ":", "# empty string, continue on", "continue", "# add next line", "data", "[", "key", "]", "+=", "\"\\n{}\"", ".", "format", "(", "value", ")", "continue", "if", "last_type", "==", "STRING_TYPE", "and", "not", "len", "(", "line", ")", ":", "if", "key", "in", "data", ".", "keys", "(", ")", ":", "data", "[", "key", "]", "+=", "\"\\n\"", "return", "json", ".", "dumps", "(", "records", ")" ]
I convert Restructured Text with field lists into Dictionaries! TODO: Convert to text node approach.
[ "I", "convert", "Restructured", "Text", "with", "field", "lists", "into", "Dictionaries!" ]
python
train
mosesschwartz/scrypture
scrypture/scrypture.py
https://github.com/mosesschwartz/scrypture/blob/d51eb0c9835a5122a655078268185ce8ab9ec86a/scrypture/scrypture.py#L241-L254
def order_by_header(table, headers): '''Convert a list of dicts to a list or OrderedDicts ordered by headers''' ordered_table = [] for row in table: # Tricky list comprehension got tricky when needing special handling # Lets do this the simplest way we can: row = {k:v for k,v in row.items() if k in headers} for h in headers: if h not in row: row[h] = '' ordered_row = OrderedDict(sorted(row.items(), key=lambda x:headers.index(x[0]))) ordered_table.append(ordered_row) return ordered_table
[ "def", "order_by_header", "(", "table", ",", "headers", ")", ":", "ordered_table", "=", "[", "]", "for", "row", "in", "table", ":", "# Tricky list comprehension got tricky when needing special handling", "# Lets do this the simplest way we can:", "row", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "row", ".", "items", "(", ")", "if", "k", "in", "headers", "}", "for", "h", "in", "headers", ":", "if", "h", "not", "in", "row", ":", "row", "[", "h", "]", "=", "''", "ordered_row", "=", "OrderedDict", "(", "sorted", "(", "row", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "headers", ".", "index", "(", "x", "[", "0", "]", ")", ")", ")", "ordered_table", ".", "append", "(", "ordered_row", ")", "return", "ordered_table" ]
Convert a list of dicts to a list or OrderedDicts ordered by headers
[ "Convert", "a", "list", "of", "dicts", "to", "a", "list", "or", "OrderedDicts", "ordered", "by", "headers" ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L1806-L1824
def del_host_comment(self, comment_id): """Delete a host comment Format of the line that triggers function call:: DEL_HOST_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None """ for item in self.daemon.hosts: if comment_id in item.comments: item.del_comment(comment_id) self.send_an_element(item.get_update_status_brok()) break else: self.send_an_element(make_monitoring_log( 'warning', 'DEL_HOST_COMMENT: comment id: %s does not exist ' 'and cannot be deleted.' % comment_id))
[ "def", "del_host_comment", "(", "self", ",", "comment_id", ")", ":", "for", "item", "in", "self", ".", "daemon", ".", "hosts", ":", "if", "comment_id", "in", "item", ".", "comments", ":", "item", ".", "del_comment", "(", "comment_id", ")", "self", ".", "send_an_element", "(", "item", ".", "get_update_status_brok", "(", ")", ")", "break", "else", ":", "self", ".", "send_an_element", "(", "make_monitoring_log", "(", "'warning'", ",", "'DEL_HOST_COMMENT: comment id: %s does not exist '", "'and cannot be deleted.'", "%", "comment_id", ")", ")" ]
Delete a host comment Format of the line that triggers function call:: DEL_HOST_COMMENT;<comment_id> :param comment_id: comment id to delete :type comment_id: int :return: None
[ "Delete", "a", "host", "comment", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/wrapper.py
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/wrapper.py#L46-L59
def libdmtx_function(fname, restype, *args): """Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function. """ prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
[ "def", "libdmtx_function", "(", "fname", ",", "restype", ",", "*", "args", ")", ":", "prototype", "=", "CFUNCTYPE", "(", "restype", ",", "*", "args", ")", "return", "prototype", "(", "(", "fname", ",", "load_libdmtx", "(", ")", ")", ")" ]
Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
[ "Returns", "a", "foreign", "function", "exported", "by", "libdmtx", "." ]
python
train
gplepage/lsqfit
src/lsqfit/__init__.py
https://github.com/gplepage/lsqfit/blob/6a57fd687632c175fccb47d8e8e943cda5e9ce9d/src/lsqfit/__init__.py#L1658-L1665
def _unpack_gvars(g): """ Unpack collection of GVars to BufferDict or numpy array. """ if g is not None: g = _gvar.gvar(g) if not hasattr(g, 'flat'): # must be a scalar (ie, not an array and not a dictionary) g = numpy.asarray(g) return g
[ "def", "_unpack_gvars", "(", "g", ")", ":", "if", "g", "is", "not", "None", ":", "g", "=", "_gvar", ".", "gvar", "(", "g", ")", "if", "not", "hasattr", "(", "g", ",", "'flat'", ")", ":", "# must be a scalar (ie, not an array and not a dictionary)", "g", "=", "numpy", ".", "asarray", "(", "g", ")", "return", "g" ]
Unpack collection of GVars to BufferDict or numpy array.
[ "Unpack", "collection", "of", "GVars", "to", "BufferDict", "or", "numpy", "array", "." ]
python
train
briancappello/flask-unchained
flask_unchained/unchained.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/unchained.py#L550-L578
def template_global(self, arg: Optional[Callable] = None, *, name: Optional[str] = None, pass_context: bool = False, inject: Optional[Union[bool, Iterable[str]]] = None, safe: bool = False, ) -> Callable: """ Decorator to mark a function as a Jinja template global (tag). :param name: The name of the tag, if different from the function name. :param pass_context: Whether or not to pass the template context into the tag. If ``True``, the first argument must be the context. :param inject: Whether or not this tag needs any dependencies injected. :param safe: Whether or not to mark the output of this tag as html-safe. """ def wrapper(fn): fn = _inject(fn, inject) if safe: fn = _make_safe(fn) if pass_context: fn = jinja2.contextfunction(fn) self._defer(lambda app: app.add_template_global(fn, name=name)) return fn if callable(arg): return wrapper(arg) return wrapper
[ "def", "template_global", "(", "self", ",", "arg", ":", "Optional", "[", "Callable", "]", "=", "None", ",", "*", ",", "name", ":", "Optional", "[", "str", "]", "=", "None", ",", "pass_context", ":", "bool", "=", "False", ",", "inject", ":", "Optional", "[", "Union", "[", "bool", ",", "Iterable", "[", "str", "]", "]", "]", "=", "None", ",", "safe", ":", "bool", "=", "False", ",", ")", "->", "Callable", ":", "def", "wrapper", "(", "fn", ")", ":", "fn", "=", "_inject", "(", "fn", ",", "inject", ")", "if", "safe", ":", "fn", "=", "_make_safe", "(", "fn", ")", "if", "pass_context", ":", "fn", "=", "jinja2", ".", "contextfunction", "(", "fn", ")", "self", ".", "_defer", "(", "lambda", "app", ":", "app", ".", "add_template_global", "(", "fn", ",", "name", "=", "name", ")", ")", "return", "fn", "if", "callable", "(", "arg", ")", ":", "return", "wrapper", "(", "arg", ")", "return", "wrapper" ]
Decorator to mark a function as a Jinja template global (tag). :param name: The name of the tag, if different from the function name. :param pass_context: Whether or not to pass the template context into the tag. If ``True``, the first argument must be the context. :param inject: Whether or not this tag needs any dependencies injected. :param safe: Whether or not to mark the output of this tag as html-safe.
[ "Decorator", "to", "mark", "a", "function", "as", "a", "Jinja", "template", "global", "(", "tag", ")", "." ]
python
train
GGiecold/Cluster_Ensembles
src/Cluster_Ensembles/Cluster_Ensembles.py
https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L218-L237
def load_hypergraph_adjacency(hdf5_file_name): """ Parameters ---------- hdf5_file_name : file handle or string Returns ------- hypergraph_adjacency : compressed sparse row matrix """ with tables.open_file(hdf5_file_name, 'r+') as fileh: pars = [] for par in ('data', 'indices', 'indptr', 'shape'): pars.append(getattr(fileh.root.consensus_group, par).read()) hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3]) return hypergraph_adjacency
[ "def", "load_hypergraph_adjacency", "(", "hdf5_file_name", ")", ":", "with", "tables", ".", "open_file", "(", "hdf5_file_name", ",", "'r+'", ")", "as", "fileh", ":", "pars", "=", "[", "]", "for", "par", "in", "(", "'data'", ",", "'indices'", ",", "'indptr'", ",", "'shape'", ")", ":", "pars", ".", "append", "(", "getattr", "(", "fileh", ".", "root", ".", "consensus_group", ",", "par", ")", ".", "read", "(", ")", ")", "hypergraph_adjacency", "=", "scipy", ".", "sparse", ".", "csr_matrix", "(", "tuple", "(", "pars", "[", ":", "3", "]", ")", ",", "shape", "=", "pars", "[", "3", "]", ")", "return", "hypergraph_adjacency" ]
Parameters ---------- hdf5_file_name : file handle or string Returns ------- hypergraph_adjacency : compressed sparse row matrix
[ "Parameters", "----------", "hdf5_file_name", ":", "file", "handle", "or", "string", "Returns", "-------", "hypergraph_adjacency", ":", "compressed", "sparse", "row", "matrix" ]
python
train
knipknap/exscript
Exscript/account.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/account.py#L508-L518
def get_account_from_name(self, name): """ Returns the account with the given name. :type name: string :param name: The name of the account. """ for account in self.accounts: if account.get_name() == name: return account return None
[ "def", "get_account_from_name", "(", "self", ",", "name", ")", ":", "for", "account", "in", "self", ".", "accounts", ":", "if", "account", ".", "get_name", "(", ")", "==", "name", ":", "return", "account", "return", "None" ]
Returns the account with the given name. :type name: string :param name: The name of the account.
[ "Returns", "the", "account", "with", "the", "given", "name", "." ]
python
train
mogproject/mog-commons-python
src/mog_commons/collection.py
https://github.com/mogproject/mog-commons-python/blob/951cf0fa9a56248b4d45be720be25f1d4b7e1bff/src/mog_commons/collection.py#L14-L17
def get_single_key(d): """Get a key from a dict which contains just one item.""" assert len(d) == 1, 'Single-item dict must have just one item, not %d.' % len(d) return next(six.iterkeys(d))
[ "def", "get_single_key", "(", "d", ")", ":", "assert", "len", "(", "d", ")", "==", "1", ",", "'Single-item dict must have just one item, not %d.'", "%", "len", "(", "d", ")", "return", "next", "(", "six", ".", "iterkeys", "(", "d", ")", ")" ]
Get a key from a dict which contains just one item.
[ "Get", "a", "key", "from", "a", "dict", "which", "contains", "just", "one", "item", "." ]
python
train
evhub/coconut
coconut/compiler/compiler.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1084-L1086
def repl_proc(self, inputstring, log=True, **kwargs): """Process using replprocs.""" return self.apply_procs(self.replprocs, kwargs, inputstring, log=log)
[ "def", "repl_proc", "(", "self", ",", "inputstring", ",", "log", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "apply_procs", "(", "self", ".", "replprocs", ",", "kwargs", ",", "inputstring", ",", "log", "=", "log", ")" ]
Process using replprocs.
[ "Process", "using", "replprocs", "." ]
python
train
cackharot/suds-py3
suds/bindings/multiref.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/bindings/multiref.py#L95-L109
def build_catalog(self, body): """ Create the I{catalog} of multiref nodes by id and the list of non-multiref nodes. @param body: A soap envelope body node. @type body: L{Element} """ for child in body.children: if self.soaproot(child): self.nodes.append(child) id = child.get('id') if id is None: continue key = '#%s' % id self.catalog[key] = child
[ "def", "build_catalog", "(", "self", ",", "body", ")", ":", "for", "child", "in", "body", ".", "children", ":", "if", "self", ".", "soaproot", "(", "child", ")", ":", "self", ".", "nodes", ".", "append", "(", "child", ")", "id", "=", "child", ".", "get", "(", "'id'", ")", "if", "id", "is", "None", ":", "continue", "key", "=", "'#%s'", "%", "id", "self", ".", "catalog", "[", "key", "]", "=", "child" ]
Create the I{catalog} of multiref nodes by id and the list of non-multiref nodes. @param body: A soap envelope body node. @type body: L{Element}
[ "Create", "the", "I", "{", "catalog", "}", "of", "multiref", "nodes", "by", "id", "and", "the", "list", "of", "non", "-", "multiref", "nodes", "." ]
python
train
lambdamusic/Ontospy
ontospy/core/ontospy.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L953-L966
def ontologyClassTree(self): """ Returns a dict representing the ontology tree Top level = {0:[top classes]} Multi inheritance is represented explicitly """ treedict = {} if self.all_classes: treedict[0] = self.toplayer_classes for element in self.all_classes: if element.children(): treedict[element] = element.children() return treedict return treedict
[ "def", "ontologyClassTree", "(", "self", ")", ":", "treedict", "=", "{", "}", "if", "self", ".", "all_classes", ":", "treedict", "[", "0", "]", "=", "self", ".", "toplayer_classes", "for", "element", "in", "self", ".", "all_classes", ":", "if", "element", ".", "children", "(", ")", ":", "treedict", "[", "element", "]", "=", "element", ".", "children", "(", ")", "return", "treedict", "return", "treedict" ]
Returns a dict representing the ontology tree Top level = {0:[top classes]} Multi inheritance is represented explicitly
[ "Returns", "a", "dict", "representing", "the", "ontology", "tree", "Top", "level", "=", "{", "0", ":", "[", "top", "classes", "]", "}", "Multi", "inheritance", "is", "represented", "explicitly" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/scene/widgets/grid.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/widgets/grid.py#L165-L193
def resize_widget(self, widget, row_span, col_span): """Resize a widget in the grid to new dimensions. Parameters ---------- widget : Widget The widget to resize row_span : int The number of rows to be occupied by this widget. col_span : int The number of columns to be occupied by this widget. """ row = None col = None for (r, c, rspan, cspan, w) in self._grid_widgets.values(): if w == widget: row = r col = c break if row is None or col is None: raise ValueError("%s not found in grid" % widget) self.remove_widget(widget) self.add_widget(widget, row, col, row_span, col_span) self._need_solver_recreate = True
[ "def", "resize_widget", "(", "self", ",", "widget", ",", "row_span", ",", "col_span", ")", ":", "row", "=", "None", "col", "=", "None", "for", "(", "r", ",", "c", ",", "rspan", ",", "cspan", ",", "w", ")", "in", "self", ".", "_grid_widgets", ".", "values", "(", ")", ":", "if", "w", "==", "widget", ":", "row", "=", "r", "col", "=", "c", "break", "if", "row", "is", "None", "or", "col", "is", "None", ":", "raise", "ValueError", "(", "\"%s not found in grid\"", "%", "widget", ")", "self", ".", "remove_widget", "(", "widget", ")", "self", ".", "add_widget", "(", "widget", ",", "row", ",", "col", ",", "row_span", ",", "col_span", ")", "self", ".", "_need_solver_recreate", "=", "True" ]
Resize a widget in the grid to new dimensions. Parameters ---------- widget : Widget The widget to resize row_span : int The number of rows to be occupied by this widget. col_span : int The number of columns to be occupied by this widget.
[ "Resize", "a", "widget", "in", "the", "grid", "to", "new", "dimensions", "." ]
python
train
secdev/scapy
scapy/contrib/diameter.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/diameter.py#L4811-L4816
def DiamReq(cmd, **fields): """Craft Diameter request commands""" upfields, name = getCmdParams(cmd, True, **fields) p = DiamG(**upfields) p.name = name return p
[ "def", "DiamReq", "(", "cmd", ",", "*", "*", "fields", ")", ":", "upfields", ",", "name", "=", "getCmdParams", "(", "cmd", ",", "True", ",", "*", "*", "fields", ")", "p", "=", "DiamG", "(", "*", "*", "upfields", ")", "p", ".", "name", "=", "name", "return", "p" ]
Craft Diameter request commands
[ "Craft", "Diameter", "request", "commands" ]
python
train
idlesign/django-sitegate
sitegate/flows_base.py
https://github.com/idlesign/django-sitegate/blob/0e58de91605071833d75a7c21f2d0de2f2e3c896/sitegate/flows_base.py#L57-L71
def update_request(self, request, form): """Updates Request object with flows forms.""" forms_key = '%s_forms' % self.flow_type # Use ordered forms dict in case _formNode wants to fetch the first defined. flow_dict = OrderedDict() try: flow_dict = request.sitegate[forms_key] except AttributeError: request.sitegate = {} except KeyError: pass flow_dict[self.get_flow_name()] = form request.sitegate[forms_key] = flow_dict
[ "def", "update_request", "(", "self", ",", "request", ",", "form", ")", ":", "forms_key", "=", "'%s_forms'", "%", "self", ".", "flow_type", "# Use ordered forms dict in case _formNode wants to fetch the first defined.", "flow_dict", "=", "OrderedDict", "(", ")", "try", ":", "flow_dict", "=", "request", ".", "sitegate", "[", "forms_key", "]", "except", "AttributeError", ":", "request", ".", "sitegate", "=", "{", "}", "except", "KeyError", ":", "pass", "flow_dict", "[", "self", ".", "get_flow_name", "(", ")", "]", "=", "form", "request", ".", "sitegate", "[", "forms_key", "]", "=", "flow_dict" ]
Updates Request object with flows forms.
[ "Updates", "Request", "object", "with", "flows", "forms", "." ]
python
train
mattjj/pylds
pylds/laplace.py
https://github.com/mattjj/pylds/blob/e946bfa5aa76e8f8284614561a0f40ffd5d868fb/pylds/laplace.py#L39-L51
def grad_local_log_likelihood(self, x): """ return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes """ T, D = self.T, self.D_latent assert x.shape == (T, D) gfun = grad(self.local_log_likelihood) g = np.zeros((T, D)) for t in range(T): g[t] += gfun(x[t], self.data[t], self.inputs[t]) return g
[ "def", "grad_local_log_likelihood", "(", "self", ",", "x", ")", ":", "T", ",", "D", "=", "self", ".", "T", ",", "self", ".", "D_latent", "assert", "x", ".", "shape", "==", "(", "T", ",", "D", ")", "gfun", "=", "grad", "(", "self", ".", "local_log_likelihood", ")", "g", "=", "np", ".", "zeros", "(", "(", "T", ",", "D", ")", ")", "for", "t", "in", "range", "(", "T", ")", ":", "g", "[", "t", "]", "+=", "gfun", "(", "x", "[", "t", "]", ",", "self", ".", "data", "[", "t", "]", ",", "self", ".", "inputs", "[", "t", "]", ")", "return", "g" ]
return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes
[ "return", "d", "/", "dxt", "log", "p", "(", "yt", "|", "xt", ")", "evaluated", "at", "xt", "Optionally", "override", "this", "in", "base", "classes" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L710-L781
def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems. """ is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and tokenizer.TryConsume('[')): packed_type_name = self._ConsumeAnyTypeUrl(tokenizer) tokenizer.Consume(']') tokenizer.TryConsume(':') if tokenizer.TryConsume('<'): expanded_any_end_token = '>' else: tokenizer.Consume('{') expanded_any_end_token = '}' if not self.descriptor_pool: raise ParseError('Descriptor pool required to parse expanded Any field') expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name, self.descriptor_pool) if not expanded_any_sub_message: raise ParseError('Type %s not found in descriptor pool' % packed_type_name) while not tokenizer.TryConsume(expanded_any_end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (expanded_any_end_token,)) self._MergeField(tokenizer, expanded_any_sub_message) if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: any_message = getattr(message, field.name).add() else: any_message = getattr(message, field.name) any_message.Pack(expanded_any_sub_message) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: sub_message = getattr(message, field.name).GetEntryClass()() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
[ "def", "_MergeMessageField", "(", "self", ",", "tokenizer", ",", "message", ",", "field", ")", ":", "is_map_entry", "=", "_IsMapEntry", "(", "field", ")", "if", "tokenizer", ".", "TryConsume", "(", "'<'", ")", ":", "end_token", "=", "'>'", "else", ":", "tokenizer", ".", "Consume", "(", "'{'", ")", "end_token", "=", "'}'", "if", "(", "field", ".", "message_type", ".", "full_name", "==", "_ANY_FULL_TYPE_NAME", "and", "tokenizer", ".", "TryConsume", "(", "'['", ")", ")", ":", "packed_type_name", "=", "self", ".", "_ConsumeAnyTypeUrl", "(", "tokenizer", ")", "tokenizer", ".", "Consume", "(", "']'", ")", "tokenizer", ".", "TryConsume", "(", "':'", ")", "if", "tokenizer", ".", "TryConsume", "(", "'<'", ")", ":", "expanded_any_end_token", "=", "'>'", "else", ":", "tokenizer", ".", "Consume", "(", "'{'", ")", "expanded_any_end_token", "=", "'}'", "if", "not", "self", ".", "descriptor_pool", ":", "raise", "ParseError", "(", "'Descriptor pool required to parse expanded Any field'", ")", "expanded_any_sub_message", "=", "_BuildMessageFromTypeName", "(", "packed_type_name", ",", "self", ".", "descriptor_pool", ")", "if", "not", "expanded_any_sub_message", ":", "raise", "ParseError", "(", "'Type %s not found in descriptor pool'", "%", "packed_type_name", ")", "while", "not", "tokenizer", ".", "TryConsume", "(", "expanded_any_end_token", ")", ":", "if", "tokenizer", ".", "AtEnd", "(", ")", ":", "raise", "tokenizer", ".", "ParseErrorPreviousToken", "(", "'Expected \"%s\".'", "%", "(", "expanded_any_end_token", ",", ")", ")", "self", ".", "_MergeField", "(", "tokenizer", ",", "expanded_any_sub_message", ")", "if", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "any_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "add", "(", ")", "else", ":", "any_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "any_message", ".", "Pack", "(", "expanded_any_sub_message", ")", "elif", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", ".", "add", "(", ")", "elif", "is_map_entry", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "GetEntryClass", "(", ")", "(", ")", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "add", "(", ")", "else", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "sub_message", ".", "SetInParent", "(", ")", "while", "not", "tokenizer", ".", "TryConsume", "(", "end_token", ")", ":", "if", "tokenizer", ".", "AtEnd", "(", ")", ":", "raise", "tokenizer", ".", "ParseErrorPreviousToken", "(", "'Expected \"%s\".'", "%", "(", "end_token", ",", ")", ")", "self", ".", "_MergeField", "(", "tokenizer", ",", "sub_message", ")", "if", "is_map_entry", ":", "value_cpptype", "=", "field", ".", "message_type", ".", "fields_by_name", "[", "'value'", "]", ".", "cpp_type", "if", "value_cpptype", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "value", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "value", ".", "MergeFrom", "(", "sub_message", ".", "value", ")", "else", ":", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "=", "sub_message", ".", "value" ]
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
[ "Merges", "a", "single", "scalar", "field", "into", "a", "message", "." ]
python
train
chrlie/shorten
shorten/base.py
https://github.com/chrlie/shorten/blob/fb762a199979aefaa28c88fa035e88ea8ce4d639/shorten/base.py#L149-L161
def next_formatted_pair(self): """\ Returns a :class:`FormattedPair <shorten.store.FormattedPair>` containing attributes `key`, `token`, `formatted_key` and `formatted_token`. Calling this method will always consume a key and token. """ key = self.key_gen.next() token = self.token_gen.create_token(key) fkey = self.formatter.format_key(key) ftoken = self.formatter.format_token(token) return FormattedPair(key, token, fkey, ftoken)
[ "def", "next_formatted_pair", "(", "self", ")", ":", "key", "=", "self", ".", "key_gen", ".", "next", "(", ")", "token", "=", "self", ".", "token_gen", ".", "create_token", "(", "key", ")", "fkey", "=", "self", ".", "formatter", ".", "format_key", "(", "key", ")", "ftoken", "=", "self", ".", "formatter", ".", "format_token", "(", "token", ")", "return", "FormattedPair", "(", "key", ",", "token", ",", "fkey", ",", "ftoken", ")" ]
\ Returns a :class:`FormattedPair <shorten.store.FormattedPair>` containing attributes `key`, `token`, `formatted_key` and `formatted_token`. Calling this method will always consume a key and token.
[ "\\", "Returns", "a", ":", "class", ":", "FormattedPair", "<shorten", ".", "store", ".", "FormattedPair", ">", "containing", "attributes", "key", "token", "formatted_key", "and", "formatted_token", ".", "Calling", "this", "method", "will", "always", "consume", "a", "key", "and", "token", "." ]
python
train
vtkiorg/vtki
vtki/utilities.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L44-L50
def cell_scalar(mesh, name): """ Returns cell scalars of a vtk object """ vtkarr = mesh.GetCellData().GetArray(name) if vtkarr: if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) return vtk_to_numpy(vtkarr)
[ "def", "cell_scalar", "(", "mesh", ",", "name", ")", ":", "vtkarr", "=", "mesh", ".", "GetCellData", "(", ")", ".", "GetArray", "(", "name", ")", "if", "vtkarr", ":", "if", "isinstance", "(", "vtkarr", ",", "vtk", ".", "vtkBitArray", ")", ":", "vtkarr", "=", "vtk_bit_array_to_char", "(", "vtkarr", ")", "return", "vtk_to_numpy", "(", "vtkarr", ")" ]
Returns cell scalars of a vtk object
[ "Returns", "cell", "scalars", "of", "a", "vtk", "object" ]
python
train
mitsei/dlkit
dlkit/records/assessment/qti/inline_choice_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/inline_choice_records.py#L926-L967
def _init_metadata(self): """stub""" self._inline_regions_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'inline_regions'), 'element_label': 'set of inline regions', 'instructions': 'submit correct choice for answer for each region', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', } self._choice_ids_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choice_ids'), 'element_label': 'response set with inline regions', 'instructions': 'submit correct choice for answer for each region', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [[]], 'syntax': 'OBJECT', } self._choice_id_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'choice_id'), 'element_label': 'response set', 'instructions': 'submit correct choice for answer', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "self", ".", "_inline_regions_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'inline_regions'", ")", ",", "'element_label'", ":", "'set of inline regions'", ",", "'instructions'", ":", "'submit correct choice for answer for each region'", ",", "'required'", ":", "True", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_object_values'", ":", "[", "{", "}", "]", ",", "'syntax'", ":", "'OBJECT'", ",", "}", "self", ".", "_choice_ids_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'choice_ids'", ")", ",", "'element_label'", ":", "'response set with inline regions'", ",", "'instructions'", ":", "'submit correct choice for answer for each region'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_object_values'", ":", "[", "[", "]", "]", ",", "'syntax'", ":", "'OBJECT'", ",", "}", "self", ".", "_choice_id_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'choice_id'", ")", ",", "'element_label'", ":", "'response set'", ",", "'instructions'", ":", "'submit correct choice for answer'", ",", "'required'", ":", "True", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_id_values'", ":", "[", "''", "]", ",", "'syntax'", ":", "'ID'", ",", "'id_set'", ":", "[", "]", "}" ]
stub
[ "stub" ]
python
train
DeepHorizons/iarm
iarm/arm_instructions/memory.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/memory.py#L260-L279
def LDRSB(self, params): """ LDRSB Ra, [Rb, Rc] Load a byte from memory, sign extend, and put into Ra Ra, Rb, and Rc must be low registers """ # TODO LDRSB cant use immediates Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params) self.check_arguments(low_registers=(Ra, Rb, Rc)) def LDRSB_func(): # TODO does memory read up? self.register[Ra] = 0 self.register[Ra] |= self.memory[self.register[Rb] + self.register[Rc]] if self.register[Ra] & (1 << 7): self.register[Ra] |= (0xFFFFFF << 8) return LDRSB_func
[ "def", "LDRSB", "(", "self", ",", "params", ")", ":", "# TODO LDRSB cant use immediates", "Ra", ",", "Rb", ",", "Rc", "=", "self", ".", "get_three_parameters", "(", "self", ".", "THREE_PARAMETER_WITH_BRACKETS", ",", "params", ")", "self", ".", "check_arguments", "(", "low_registers", "=", "(", "Ra", ",", "Rb", ",", "Rc", ")", ")", "def", "LDRSB_func", "(", ")", ":", "# TODO does memory read up?", "self", ".", "register", "[", "Ra", "]", "=", "0", "self", ".", "register", "[", "Ra", "]", "|=", "self", ".", "memory", "[", "self", ".", "register", "[", "Rb", "]", "+", "self", ".", "register", "[", "Rc", "]", "]", "if", "self", ".", "register", "[", "Ra", "]", "&", "(", "1", "<<", "7", ")", ":", "self", ".", "register", "[", "Ra", "]", "|=", "(", "0xFFFFFF", "<<", "8", ")", "return", "LDRSB_func" ]
LDRSB Ra, [Rb, Rc] Load a byte from memory, sign extend, and put into Ra Ra, Rb, and Rc must be low registers
[ "LDRSB", "Ra", "[", "Rb", "Rc", "]" ]
python
train
kejbaly2/metrique
metrique/result.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/result.py#L395-L427
def last_chain(self): ''' Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another. ''' cols = self.columns.tolist() i_oid = cols.index('_oid') i_start = cols.index('_start') i_end = cols.index('_end') start_map = {} end_map = {} for row in self.values: oid = row[i_oid] if oid not in start_map: start_map[oid] = set() end_map[oid] = set() start_map[oid].add(row[i_start]) end_map[oid].add(row[i_end]) cutoffs = {} for oid in start_map: maxend = pd.NaT if pd.NaT in end_map[oid] else max(end_map[oid]) ends = end_map[oid] - start_map[oid] - set([maxend]) cutoffs[oid] = None if len(ends) == 0 else max(ends) vals = [row for row in self.values if cutoffs[row[i_oid]] is None or cutoffs[row[i_oid]] < row[i_start]] return pd.DataFrame(vals, columns=cols)
[ "def", "last_chain", "(", "self", ")", ":", "cols", "=", "self", ".", "columns", ".", "tolist", "(", ")", "i_oid", "=", "cols", ".", "index", "(", "'_oid'", ")", "i_start", "=", "cols", ".", "index", "(", "'_start'", ")", "i_end", "=", "cols", ".", "index", "(", "'_end'", ")", "start_map", "=", "{", "}", "end_map", "=", "{", "}", "for", "row", "in", "self", ".", "values", ":", "oid", "=", "row", "[", "i_oid", "]", "if", "oid", "not", "in", "start_map", ":", "start_map", "[", "oid", "]", "=", "set", "(", ")", "end_map", "[", "oid", "]", "=", "set", "(", ")", "start_map", "[", "oid", "]", ".", "add", "(", "row", "[", "i_start", "]", ")", "end_map", "[", "oid", "]", ".", "add", "(", "row", "[", "i_end", "]", ")", "cutoffs", "=", "{", "}", "for", "oid", "in", "start_map", ":", "maxend", "=", "pd", ".", "NaT", "if", "pd", ".", "NaT", "in", "end_map", "[", "oid", "]", "else", "max", "(", "end_map", "[", "oid", "]", ")", "ends", "=", "end_map", "[", "oid", "]", "-", "start_map", "[", "oid", "]", "-", "set", "(", "[", "maxend", "]", ")", "cutoffs", "[", "oid", "]", "=", "None", "if", "len", "(", "ends", ")", "==", "0", "else", "max", "(", "ends", ")", "vals", "=", "[", "row", "for", "row", "in", "self", ".", "values", "if", "cutoffs", "[", "row", "[", "i_oid", "]", "]", "is", "None", "or", "cutoffs", "[", "row", "[", "i_oid", "]", "]", "<", "row", "[", "i_start", "]", "]", "return", "pd", ".", "DataFrame", "(", "vals", ",", "columns", "=", "cols", ")" ]
Leaves only the last chain for each object. Chain is a series of consecutive versions where `_end` of one is `_start` of another.
[ "Leaves", "only", "the", "last", "chain", "for", "each", "object", "." ]
python
train
nesaro/pydsl
pydsl/extract.py
https://github.com/nesaro/pydsl/blob/00b4fffd72036b80335e1a44a888fac57917ab41/pydsl/extract.py#L44-L74
def extract_alphabet(alphabet, inputdata, fixed_start = False): """ Receives a sequence and an alphabet, returns a list of PositionTokens with all of the parts of the sequence that are a subset of the alphabet """ if not inputdata: return [] base_alphabet = alphabet.alphabet lexer = lexer_factory(alphabet, base_alphabet) totallen = len(inputdata) maxl = totallen minl = 1 if fixed_start: max_start = 1 else: max_start = totallen result = [] for i in range(max_start): for j in range(i+minl, min(i+maxl, totallen) + 1): try: lexed = lexer(inputdata[i:j]) if lexed and len(lexed) == 1: result.append((i,j, inputdata[i:j], lexed[0].gd)) elif lexed: raise Exception except: continue result = filter_subsets(result) return [PositionToken(content, gd, left, right) for (left, right, content, gd) in result]
[ "def", "extract_alphabet", "(", "alphabet", ",", "inputdata", ",", "fixed_start", "=", "False", ")", ":", "if", "not", "inputdata", ":", "return", "[", "]", "base_alphabet", "=", "alphabet", ".", "alphabet", "lexer", "=", "lexer_factory", "(", "alphabet", ",", "base_alphabet", ")", "totallen", "=", "len", "(", "inputdata", ")", "maxl", "=", "totallen", "minl", "=", "1", "if", "fixed_start", ":", "max_start", "=", "1", "else", ":", "max_start", "=", "totallen", "result", "=", "[", "]", "for", "i", "in", "range", "(", "max_start", ")", ":", "for", "j", "in", "range", "(", "i", "+", "minl", ",", "min", "(", "i", "+", "maxl", ",", "totallen", ")", "+", "1", ")", ":", "try", ":", "lexed", "=", "lexer", "(", "inputdata", "[", "i", ":", "j", "]", ")", "if", "lexed", "and", "len", "(", "lexed", ")", "==", "1", ":", "result", ".", "append", "(", "(", "i", ",", "j", ",", "inputdata", "[", "i", ":", "j", "]", ",", "lexed", "[", "0", "]", ".", "gd", ")", ")", "elif", "lexed", ":", "raise", "Exception", "except", ":", "continue", "result", "=", "filter_subsets", "(", "result", ")", "return", "[", "PositionToken", "(", "content", ",", "gd", ",", "left", ",", "right", ")", "for", "(", "left", ",", "right", ",", "content", ",", "gd", ")", "in", "result", "]" ]
Receives a sequence and an alphabet, returns a list of PositionTokens with all of the parts of the sequence that are a subset of the alphabet
[ "Receives", "a", "sequence", "and", "an", "alphabet", "returns", "a", "list", "of", "PositionTokens", "with", "all", "of", "the", "parts", "of", "the", "sequence", "that", "are", "a", "subset", "of", "the", "alphabet" ]
python
train
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L1068-L1100
def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False, force_colorize=False): """ Tail this function's logs. if keep_open, do so repeatedly, printing any new logs """ try: since_stamp = string_to_timestamp(since) last_since = since_stamp while True: new_logs = self.zappa.fetch_logs( self.lambda_name, start_time=since_stamp, limit=limit, filter_pattern=filter_pattern, ) new_logs = [ e for e in new_logs if e['timestamp'] > last_since ] self.print_logs(new_logs, colorize, http, non_http, force_colorize) if not keep_open: break if new_logs: last_since = new_logs[-1]['timestamp'] time.sleep(1) except KeyboardInterrupt: # pragma: no cover # Die gracefully try: sys.exit(0) except SystemExit: os._exit(130)
[ "def", "tail", "(", "self", ",", "since", ",", "filter_pattern", ",", "limit", "=", "10000", ",", "keep_open", "=", "True", ",", "colorize", "=", "True", ",", "http", "=", "False", ",", "non_http", "=", "False", ",", "force_colorize", "=", "False", ")", ":", "try", ":", "since_stamp", "=", "string_to_timestamp", "(", "since", ")", "last_since", "=", "since_stamp", "while", "True", ":", "new_logs", "=", "self", ".", "zappa", ".", "fetch_logs", "(", "self", ".", "lambda_name", ",", "start_time", "=", "since_stamp", ",", "limit", "=", "limit", ",", "filter_pattern", "=", "filter_pattern", ",", ")", "new_logs", "=", "[", "e", "for", "e", "in", "new_logs", "if", "e", "[", "'timestamp'", "]", ">", "last_since", "]", "self", ".", "print_logs", "(", "new_logs", ",", "colorize", ",", "http", ",", "non_http", ",", "force_colorize", ")", "if", "not", "keep_open", ":", "break", "if", "new_logs", ":", "last_since", "=", "new_logs", "[", "-", "1", "]", "[", "'timestamp'", "]", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "# pragma: no cover", "# Die gracefully", "try", ":", "sys", ".", "exit", "(", "0", ")", "except", "SystemExit", ":", "os", ".", "_exit", "(", "130", ")" ]
Tail this function's logs. if keep_open, do so repeatedly, printing any new logs
[ "Tail", "this", "function", "s", "logs", "." ]
python
train
saltstack/salt
salt/sdb/cache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/cache.py#L66-L73
def set_(key, value, service=None, profile=None): # pylint: disable=W0613 ''' Set a key/value pair in the cache service ''' key, profile = _parse_key(key, profile) cache = salt.cache.Cache(__opts__) cache.store(profile['bank'], key, value) return get(key, service, profile)
[ "def", "set_", "(", "key", ",", "value", ",", "service", "=", "None", ",", "profile", "=", "None", ")", ":", "# pylint: disable=W0613", "key", ",", "profile", "=", "_parse_key", "(", "key", ",", "profile", ")", "cache", "=", "salt", ".", "cache", ".", "Cache", "(", "__opts__", ")", "cache", ".", "store", "(", "profile", "[", "'bank'", "]", ",", "key", ",", "value", ")", "return", "get", "(", "key", ",", "service", ",", "profile", ")" ]
Set a key/value pair in the cache service
[ "Set", "a", "key", "/", "value", "pair", "in", "the", "cache", "service" ]
python
train
sprockets/sprockets.mixins.amqp
sprockets/mixins/amqp/__init__.py
https://github.com/sprockets/sprockets.mixins.amqp/blob/de22b85aec1315bc01e47774637098c34525692b/sprockets/mixins/amqp/__init__.py#L390-L401
def close(self): """Cleanly shutdown the connection to RabbitMQ :raises: sprockets.mixins.amqp.ConnectionStateError """ if not self.closable: LOGGER.warning('Closed called while %s', self.state_description) raise ConnectionStateError(self.state_description) self.state = self.STATE_CLOSING LOGGER.info('Closing RabbitMQ connection') self.connection.close()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closable", ":", "LOGGER", ".", "warning", "(", "'Closed called while %s'", ",", "self", ".", "state_description", ")", "raise", "ConnectionStateError", "(", "self", ".", "state_description", ")", "self", ".", "state", "=", "self", ".", "STATE_CLOSING", "LOGGER", ".", "info", "(", "'Closing RabbitMQ connection'", ")", "self", ".", "connection", ".", "close", "(", ")" ]
Cleanly shutdown the connection to RabbitMQ :raises: sprockets.mixins.amqp.ConnectionStateError
[ "Cleanly", "shutdown", "the", "connection", "to", "RabbitMQ" ]
python
train
dw/mitogen
ansible_mitogen/runner.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/runner.py#L231-L241
def reopen_readonly(fp): """ Replace the file descriptor belonging to the file object `fp` with one open on the same file (`fp.name`), but opened with :py:data:`os.O_RDONLY`. This enables temporary files to be executed on Linux, which usually throws ``ETXTBUSY`` if any writeable handle exists pointing to a file passed to `execve()`. """ fd = os.open(fp.name, os.O_RDONLY) os.dup2(fd, fp.fileno()) os.close(fd)
[ "def", "reopen_readonly", "(", "fp", ")", ":", "fd", "=", "os", ".", "open", "(", "fp", ".", "name", ",", "os", ".", "O_RDONLY", ")", "os", ".", "dup2", "(", "fd", ",", "fp", ".", "fileno", "(", ")", ")", "os", ".", "close", "(", "fd", ")" ]
Replace the file descriptor belonging to the file object `fp` with one open on the same file (`fp.name`), but opened with :py:data:`os.O_RDONLY`. This enables temporary files to be executed on Linux, which usually throws ``ETXTBUSY`` if any writeable handle exists pointing to a file passed to `execve()`.
[ "Replace", "the", "file", "descriptor", "belonging", "to", "the", "file", "object", "fp", "with", "one", "open", "on", "the", "same", "file", "(", "fp", ".", "name", ")", "but", "opened", "with", ":", "py", ":", "data", ":", "os", ".", "O_RDONLY", ".", "This", "enables", "temporary", "files", "to", "be", "executed", "on", "Linux", "which", "usually", "throws", "ETXTBUSY", "if", "any", "writeable", "handle", "exists", "pointing", "to", "a", "file", "passed", "to", "execve", "()", "." ]
python
train
buildbot/buildbot
master/buildbot/process/users/users.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/process/users/users.py#L155-L169
def encrypt(passwd): """ Encrypts the incoming password after adding some salt to store it in the database. @param passwd: password portion of user credentials @type passwd: string @returns: encrypted/salted string """ m = sha1() salt = hexlify(os.urandom(salt_len)) m.update(unicode2bytes(passwd) + salt) crypted = bytes2unicode(salt) + m.hexdigest() return crypted
[ "def", "encrypt", "(", "passwd", ")", ":", "m", "=", "sha1", "(", ")", "salt", "=", "hexlify", "(", "os", ".", "urandom", "(", "salt_len", ")", ")", "m", ".", "update", "(", "unicode2bytes", "(", "passwd", ")", "+", "salt", ")", "crypted", "=", "bytes2unicode", "(", "salt", ")", "+", "m", ".", "hexdigest", "(", ")", "return", "crypted" ]
Encrypts the incoming password after adding some salt to store it in the database. @param passwd: password portion of user credentials @type passwd: string @returns: encrypted/salted string
[ "Encrypts", "the", "incoming", "password", "after", "adding", "some", "salt", "to", "store", "it", "in", "the", "database", "." ]
python
train
odlgroup/odl
odl/phantom/misc_phantoms.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/misc_phantoms.py#L19-L45
def submarine(space, smooth=True, taper=20.0): """Return a 'submarine' phantom consisting in an ellipsoid and a box. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. smooth : bool, optional If ``True``, the boundaries are smoothed out. Otherwise, the function steps from 0 to 1 at the boundaries. taper : float, optional Tapering parameter for the boundary smoothing. Larger values mean faster taper, i.e. sharper boundaries. Returns ------- phantom : ``space`` element The submarine phantom in ``space``. """ if space.ndim == 2: if smooth: return _submarine_2d_smooth(space, taper) else: return _submarine_2d_nonsmooth(space) else: raise ValueError('phantom only defined in 2 dimensions, got {}' ''.format(space.ndim))
[ "def", "submarine", "(", "space", ",", "smooth", "=", "True", ",", "taper", "=", "20.0", ")", ":", "if", "space", ".", "ndim", "==", "2", ":", "if", "smooth", ":", "return", "_submarine_2d_smooth", "(", "space", ",", "taper", ")", "else", ":", "return", "_submarine_2d_nonsmooth", "(", "space", ")", "else", ":", "raise", "ValueError", "(", "'phantom only defined in 2 dimensions, got {}'", "''", ".", "format", "(", "space", ".", "ndim", ")", ")" ]
Return a 'submarine' phantom consisting in an ellipsoid and a box. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. smooth : bool, optional If ``True``, the boundaries are smoothed out. Otherwise, the function steps from 0 to 1 at the boundaries. taper : float, optional Tapering parameter for the boundary smoothing. Larger values mean faster taper, i.e. sharper boundaries. Returns ------- phantom : ``space`` element The submarine phantom in ``space``.
[ "Return", "a", "submarine", "phantom", "consisting", "in", "an", "ellipsoid", "and", "a", "box", "." ]
python
train
saltstack/salt
salt/modules/keystone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/keystone.py#L751-L781
def project_get(project_id=None, name=None, profile=None, **connection_args): ''' Return a specific projects (keystone project-get) Overrides keystone tenant-get form api V2. For keystone api V3 only. .. versionadded:: 2016.11.0 project_id The project id. name The project name. profile Configuration profile - if configuration for multiple openstack accounts required. CLI Examples: .. code-block:: bash salt '*' keystone.project_get c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.project_get project_id=c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.project_get name=nova ''' auth(profile, **connection_args) if _OS_IDENTITY_API_VERSION > 2: return tenant_get(tenant_id=project_id, name=name, profile=None, **connection_args) else: return False
[ "def", "project_get", "(", "project_id", "=", "None", ",", "name", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", ":", "auth", "(", "profile", ",", "*", "*", "connection_args", ")", "if", "_OS_IDENTITY_API_VERSION", ">", "2", ":", "return", "tenant_get", "(", "tenant_id", "=", "project_id", ",", "name", "=", "name", ",", "profile", "=", "None", ",", "*", "*", "connection_args", ")", "else", ":", "return", "False" ]
Return a specific projects (keystone project-get) Overrides keystone tenant-get form api V2. For keystone api V3 only. .. versionadded:: 2016.11.0 project_id The project id. name The project name. profile Configuration profile - if configuration for multiple openstack accounts required. CLI Examples: .. code-block:: bash salt '*' keystone.project_get c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.project_get project_id=c965f79c4f864eaaa9c3b41904e67082 salt '*' keystone.project_get name=nova
[ "Return", "a", "specific", "projects", "(", "keystone", "project", "-", "get", ")", "Overrides", "keystone", "tenant", "-", "get", "form", "api", "V2", ".", "For", "keystone", "api", "V3", "only", "." ]
python
train
mattja/nsim
nsim/analyses1/freq.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L213-L241
def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True): """Continuous wavelet transform Note the full results can use a huge amount of memory at 64-bit precision Args: ts: Timeseries of m variables, shape (n, m). Assumed constant timestep. freqs: list of frequencies (in Hz) to use for the tranform. (default is 50 frequency bins logarithmic from 1Hz to 100Hz) wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets plot: whether to plot time-resolved power spectrum Returns: coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) """ orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] channels = ts.shape[1] fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0]) x = signal.detrend(ts, axis=0) dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype coefs = np.zeros((len(ts), len(freqs), channels), dtype) for i in range(channels): coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T if plot: _plot_cwt(ts, coefs, freqs) if orig_ndim is 1: coefs = coefs[:, :, 0] return coefs
[ "def", "cwt", "(", "ts", ",", "freqs", "=", "np", ".", "logspace", "(", "0", ",", "2", ")", ",", "wavelet", "=", "cwtmorlet", ",", "plot", "=", "True", ")", ":", "orig_ndim", "=", "ts", ".", "ndim", "if", "ts", ".", "ndim", "is", "1", ":", "ts", "=", "ts", "[", ":", ",", "np", ".", "newaxis", "]", "channels", "=", "ts", ".", "shape", "[", "1", "]", "fs", "=", "(", "len", "(", "ts", ")", "-", "1.0", ")", "/", "(", "1.0", "*", "ts", ".", "tspan", "[", "-", "1", "]", "-", "ts", ".", "tspan", "[", "0", "]", ")", "x", "=", "signal", ".", "detrend", "(", "ts", ",", "axis", "=", "0", ")", "dtype", "=", "wavelet", "(", "fs", "/", "freqs", "[", "0", "]", ",", "fs", "/", "freqs", "[", "0", "]", ")", ".", "dtype", "coefs", "=", "np", ".", "zeros", "(", "(", "len", "(", "ts", ")", ",", "len", "(", "freqs", ")", ",", "channels", ")", ",", "dtype", ")", "for", "i", "in", "range", "(", "channels", ")", ":", "coefs", "[", ":", ",", ":", ",", "i", "]", "=", "roughcwt", "(", "x", "[", ":", ",", "i", "]", ",", "cwtmorlet", ",", "fs", "/", "freqs", ")", ".", "T", "if", "plot", ":", "_plot_cwt", "(", "ts", ",", "coefs", ",", "freqs", ")", "if", "orig_ndim", "is", "1", ":", "coefs", "=", "coefs", "[", ":", ",", ":", ",", "0", "]", "return", "coefs" ]
Continuous wavelet transform Note the full results can use a huge amount of memory at 64-bit precision Args: ts: Timeseries of m variables, shape (n, m). Assumed constant timestep. freqs: list of frequencies (in Hz) to use for the tranform. (default is 50 frequency bins logarithmic from 1Hz to 100Hz) wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets plot: whether to plot time-resolved power spectrum Returns: coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
[ "Continuous", "wavelet", "transform", "Note", "the", "full", "results", "can", "use", "a", "huge", "amount", "of", "memory", "at", "64", "-", "bit", "precision" ]
python
train
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L777-L790
def list(self, **params): """ Retrieve all sources Returns all lead sources available to the user according to the parameters provided :calls: ``get /lead_sources`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources. :rtype: list """ _, _, lead_sources = self.http_client.get("/lead_sources", params=params) return lead_sources
[ "def", "list", "(", "self", ",", "*", "*", "params", ")", ":", "_", ",", "_", ",", "lead_sources", "=", "self", ".", "http_client", ".", "get", "(", "\"/lead_sources\"", ",", "params", "=", "params", ")", "return", "lead_sources" ]
Retrieve all sources Returns all lead sources available to the user according to the parameters provided :calls: ``get /lead_sources`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources. :rtype: list
[ "Retrieve", "all", "sources" ]
python
train
polyaxon/polyaxon
polyaxon/libs/statics.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/libs/statics.py#L4-L17
def get_asset_url(module: str, path: str) -> str: """Return a static asset URL (located within Polyaxon's static files). Example: ```python >>> get_asset_url('polyaxon', 'dist/global.css') ... "/_static/74d127b78dc7daf2c51f/polyaxon/dist/global.css" ``` """ return '{}/{}/{}'.format( settings.STATIC_URL.rstrip('/'), module, path.lstrip('/'), )
[ "def", "get_asset_url", "(", "module", ":", "str", ",", "path", ":", "str", ")", "->", "str", ":", "return", "'{}/{}/{}'", ".", "format", "(", "settings", ".", "STATIC_URL", ".", "rstrip", "(", "'/'", ")", ",", "module", ",", "path", ".", "lstrip", "(", "'/'", ")", ",", ")" ]
Return a static asset URL (located within Polyaxon's static files). Example: ```python >>> get_asset_url('polyaxon', 'dist/global.css') ... "/_static/74d127b78dc7daf2c51f/polyaxon/dist/global.css" ```
[ "Return", "a", "static", "asset", "URL", "(", "located", "within", "Polyaxon", "s", "static", "files", ")", "." ]
python
train
frawau/aiolifx
aiolifx/aiolifx.py
https://github.com/frawau/aiolifx/blob/9bd8c5e6d291f4c79314989402f7e2c6476d5851/aiolifx/aiolifx.py#L796-L831
def set_power(self, value,callb=None,duration=0,rapid=False): """Convenience method to set the power status of the device This method will send a SetPower message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new state :type value: str/bool/int :param duration: The duration, in seconds, of the power state transition. :type duration: int :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :param rapid: Whether to ask for ack (False) or not (True). Default False :type rapid: bool :returns: None :rtype: None """ on = [True, 1, "on"] off = [False, 0, "off"] if value in on: myvalue = 65535 else: myvalue = 0 mypartial=partial(self.resp_set_lightpower,power_level=myvalue) if callb: mycallb=lambda x,y:(mypartial(y),callb(x,y)) else: mycallb=lambda x,y:mypartial(y) if not rapid: response = self.req_with_ack(LightSetPower, {"power_level": myvalue, "duration": duration},callb=mycallb) else: response = self.fire_and_forget(LightSetPower, {"power_level": myvalue, "duration": duration}, num_repeats=1) self.power_level=myvalue if callb: callb(self,None)
[ "def", "set_power", "(", "self", ",", "value", ",", "callb", "=", "None", ",", "duration", "=", "0", ",", "rapid", "=", "False", ")", ":", "on", "=", "[", "True", ",", "1", ",", "\"on\"", "]", "off", "=", "[", "False", ",", "0", ",", "\"off\"", "]", "if", "value", "in", "on", ":", "myvalue", "=", "65535", "else", ":", "myvalue", "=", "0", "mypartial", "=", "partial", "(", "self", ".", "resp_set_lightpower", ",", "power_level", "=", "myvalue", ")", "if", "callb", ":", "mycallb", "=", "lambda", "x", ",", "y", ":", "(", "mypartial", "(", "y", ")", ",", "callb", "(", "x", ",", "y", ")", ")", "else", ":", "mycallb", "=", "lambda", "x", ",", "y", ":", "mypartial", "(", "y", ")", "if", "not", "rapid", ":", "response", "=", "self", ".", "req_with_ack", "(", "LightSetPower", ",", "{", "\"power_level\"", ":", "myvalue", ",", "\"duration\"", ":", "duration", "}", ",", "callb", "=", "mycallb", ")", "else", ":", "response", "=", "self", ".", "fire_and_forget", "(", "LightSetPower", ",", "{", "\"power_level\"", ":", "myvalue", ",", "\"duration\"", ":", "duration", "}", ",", "num_repeats", "=", "1", ")", "self", ".", "power_level", "=", "myvalue", "if", "callb", ":", "callb", "(", "self", ",", "None", ")" ]
Convenience method to set the power status of the device This method will send a SetPower message to the device, and request callb be executed when an ACK is received. The default callback will simply cache the value. :param value: The new state :type value: str/bool/int :param duration: The duration, in seconds, of the power state transition. :type duration: int :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :param rapid: Whether to ask for ack (False) or not (True). Default False :type rapid: bool :returns: None :rtype: None
[ "Convenience", "method", "to", "set", "the", "power", "status", "of", "the", "device" ]
python
train
tensorflow/probability
tensorflow_probability/python/distributions/linear_gaussian_ssm.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L44-L69
def _check_equal_shape(name, static_shape, dynamic_shape, static_target_shape, dynamic_target_shape=None): """Check that source and target shape match, statically if possible.""" static_target_shape = tf.TensorShape(static_target_shape) if tensorshape_util.is_fully_defined( static_shape) and tensorshape_util.is_fully_defined(static_target_shape): if static_shape != static_target_shape: raise ValueError("{}: required shape {} but found {}". format(name, static_target_shape, static_shape)) return None else: if dynamic_target_shape is None: if tensorshape_util.is_fully_defined(static_target_shape): dynamic_target_shape = tensorshape_util.as_list(static_target_shape) else: raise ValueError("{}: cannot infer target shape: no dynamic shape " "specified and static shape {} is not fully defined". format(name, static_target_shape)) return assert_util.assert_equal( dynamic_shape, dynamic_target_shape, message=("{}: required shape {}".format(name, static_target_shape)))
[ "def", "_check_equal_shape", "(", "name", ",", "static_shape", ",", "dynamic_shape", ",", "static_target_shape", ",", "dynamic_target_shape", "=", "None", ")", ":", "static_target_shape", "=", "tf", ".", "TensorShape", "(", "static_target_shape", ")", "if", "tensorshape_util", ".", "is_fully_defined", "(", "static_shape", ")", "and", "tensorshape_util", ".", "is_fully_defined", "(", "static_target_shape", ")", ":", "if", "static_shape", "!=", "static_target_shape", ":", "raise", "ValueError", "(", "\"{}: required shape {} but found {}\"", ".", "format", "(", "name", ",", "static_target_shape", ",", "static_shape", ")", ")", "return", "None", "else", ":", "if", "dynamic_target_shape", "is", "None", ":", "if", "tensorshape_util", ".", "is_fully_defined", "(", "static_target_shape", ")", ":", "dynamic_target_shape", "=", "tensorshape_util", ".", "as_list", "(", "static_target_shape", ")", "else", ":", "raise", "ValueError", "(", "\"{}: cannot infer target shape: no dynamic shape \"", "\"specified and static shape {} is not fully defined\"", ".", "format", "(", "name", ",", "static_target_shape", ")", ")", "return", "assert_util", ".", "assert_equal", "(", "dynamic_shape", ",", "dynamic_target_shape", ",", "message", "=", "(", "\"{}: required shape {}\"", ".", "format", "(", "name", ",", "static_target_shape", ")", ")", ")" ]
Check that source and target shape match, statically if possible.
[ "Check", "that", "source", "and", "target", "shape", "match", "statically", "if", "possible", "." ]
python
test
OzymandiasTheGreat/python-libinput
libinput/device.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1892-L1908
def has_button(self, button): """Check if this device has a given button. Args: button (int): Button to check for, see ``input.h`` for button definitions. Returns: bool: :obj:`True` if the device has this button, :obj:`False` if it does not. Raises: AssertionError """ rc = self._libinput.libinput_device_pointer_has_button( self._handle, button) assert rc >= 0, 'This device is not a pointer device' return bool(rc)
[ "def", "has_button", "(", "self", ",", "button", ")", ":", "rc", "=", "self", ".", "_libinput", ".", "libinput_device_pointer_has_button", "(", "self", ".", "_handle", ",", "button", ")", "assert", "rc", ">=", "0", ",", "'This device is not a pointer device'", "return", "bool", "(", "rc", ")" ]
Check if this device has a given button. Args: button (int): Button to check for, see ``input.h`` for button definitions. Returns: bool: :obj:`True` if the device has this button, :obj:`False` if it does not. Raises: AssertionError
[ "Check", "if", "this", "device", "has", "a", "given", "button", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L4958-L5001
def browser(i): """ Input: { (template) - use this web template (repo_uoa) - (module_uoa) - (data_uoa) - view a given entry (extra_url) - extra URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ # Check if ck-web is installed r=find({'module_uoa':'module', 'data_uoa':'wfe'}) if r['return']>0: if r['return']!=16: return r out('Seems like ck-web repository is not installed (can\'t find wfe module)!') out('Please, install it via "ck pull repo:ck-web" and try again!') return {'return':0} t=i.get('template','') ruoa=i.get('repo_uoa','') muoa=i.get('module_uoa','') duoa=i.get('data_uoa','') cid='' if duoa!='' or muoa!='' or ruoa!='': if ruoa!='': cid=ruoa+':' if muoa!='': cid+=muoa+':' if duoa!='': cid+=duoa # Starting web service and asking to open page return access({'action':'start', 'module_uoa':'web', 'browser':'yes', 'template':t, 'cid':cid, 'extra_url':i.get('extra_url','')})
[ "def", "browser", "(", "i", ")", ":", "# Check if ck-web is installed", "r", "=", "find", "(", "{", "'module_uoa'", ":", "'module'", ",", "'data_uoa'", ":", "'wfe'", "}", ")", "if", "r", "[", "'return'", "]", ">", "0", ":", "if", "r", "[", "'return'", "]", "!=", "16", ":", "return", "r", "out", "(", "'Seems like ck-web repository is not installed (can\\'t find wfe module)!'", ")", "out", "(", "'Please, install it via \"ck pull repo:ck-web\" and try again!'", ")", "return", "{", "'return'", ":", "0", "}", "t", "=", "i", ".", "get", "(", "'template'", ",", "''", ")", "ruoa", "=", "i", ".", "get", "(", "'repo_uoa'", ",", "''", ")", "muoa", "=", "i", ".", "get", "(", "'module_uoa'", ",", "''", ")", "duoa", "=", "i", ".", "get", "(", "'data_uoa'", ",", "''", ")", "cid", "=", "''", "if", "duoa", "!=", "''", "or", "muoa", "!=", "''", "or", "ruoa", "!=", "''", ":", "if", "ruoa", "!=", "''", ":", "cid", "=", "ruoa", "+", "':'", "if", "muoa", "!=", "''", ":", "cid", "+=", "muoa", "+", "':'", "if", "duoa", "!=", "''", ":", "cid", "+=", "duoa", "# Starting web service and asking to open page", "return", "access", "(", "{", "'action'", ":", "'start'", ",", "'module_uoa'", ":", "'web'", ",", "'browser'", ":", "'yes'", ",", "'template'", ":", "t", ",", "'cid'", ":", "cid", ",", "'extra_url'", ":", "i", ".", "get", "(", "'extra_url'", ",", "''", ")", "}", ")" ]
Input: { (template) - use this web template (repo_uoa) - (module_uoa) - (data_uoa) - view a given entry (extra_url) - extra URL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
[ "Input", ":", "{", "(", "template", ")", "-", "use", "this", "web", "template", "(", "repo_uoa", ")", "-", "(", "module_uoa", ")", "-", "(", "data_uoa", ")", "-", "view", "a", "given", "entry", "(", "extra_url", ")", "-", "extra", "URL", "}" ]
python
train
ecometrica/grandfatherson
grandfatherson/__init__.py
https://github.com/ecometrica/grandfatherson/blob/b166e4e44887960c3066ebd28eecadfae19561e1/grandfatherson/__init__.py#L178-L192
def to_delete(datetimes, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, firstweekday=SATURDAY, now=None): """ Return a set of datetimes that should be deleted, out of ``datetimes``. See ``to_keep`` for a description of arguments. """ datetimes = set(datetimes) return datetimes - to_keep(datetimes, years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds, firstweekday=firstweekday, now=now)
[ "def", "to_delete", "(", "datetimes", ",", "years", "=", "0", ",", "months", "=", "0", ",", "weeks", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ",", "minutes", "=", "0", ",", "seconds", "=", "0", ",", "firstweekday", "=", "SATURDAY", ",", "now", "=", "None", ")", ":", "datetimes", "=", "set", "(", "datetimes", ")", "return", "datetimes", "-", "to_keep", "(", "datetimes", ",", "years", "=", "years", ",", "months", "=", "months", ",", "weeks", "=", "weeks", ",", "days", "=", "days", ",", "hours", "=", "hours", ",", "minutes", "=", "minutes", ",", "seconds", "=", "seconds", ",", "firstweekday", "=", "firstweekday", ",", "now", "=", "now", ")" ]
Return a set of datetimes that should be deleted, out of ``datetimes``. See ``to_keep`` for a description of arguments.
[ "Return", "a", "set", "of", "datetimes", "that", "should", "be", "deleted", "out", "of", "datetimes", "." ]
python
test
O365/python-o365
O365/mailbox.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/mailbox.py#L304-L332
def refresh_folder(self, update_parent_if_changed=False): """ Re-download folder data Inbox Folder will be unable to download its own data (no folder_id) :param bool update_parent_if_changed: updates self.parent with new parent Folder if changed :return: Refreshed or Not :rtype: bool """ folder_id = getattr(self, 'folder_id', None) if self.root or folder_id is None: return False folder = self.get_folder(folder_id=folder_id) if folder is None: return False self.name = folder.name if folder.parent_id and self.parent_id: if folder.parent_id != self.parent_id: self.parent_id = folder.parent_id self.parent = (self.get_parent_folder() if update_parent_if_changed else None) self.child_folders_count = folder.child_folders_count self.unread_items_count = folder.unread_items_count self.total_items_count = folder.total_items_count self.updated_at = folder.updated_at return True
[ "def", "refresh_folder", "(", "self", ",", "update_parent_if_changed", "=", "False", ")", ":", "folder_id", "=", "getattr", "(", "self", ",", "'folder_id'", ",", "None", ")", "if", "self", ".", "root", "or", "folder_id", "is", "None", ":", "return", "False", "folder", "=", "self", ".", "get_folder", "(", "folder_id", "=", "folder_id", ")", "if", "folder", "is", "None", ":", "return", "False", "self", ".", "name", "=", "folder", ".", "name", "if", "folder", ".", "parent_id", "and", "self", ".", "parent_id", ":", "if", "folder", ".", "parent_id", "!=", "self", ".", "parent_id", ":", "self", ".", "parent_id", "=", "folder", ".", "parent_id", "self", ".", "parent", "=", "(", "self", ".", "get_parent_folder", "(", ")", "if", "update_parent_if_changed", "else", "None", ")", "self", ".", "child_folders_count", "=", "folder", ".", "child_folders_count", "self", ".", "unread_items_count", "=", "folder", ".", "unread_items_count", "self", ".", "total_items_count", "=", "folder", ".", "total_items_count", "self", ".", "updated_at", "=", "folder", ".", "updated_at", "return", "True" ]
Re-download folder data Inbox Folder will be unable to download its own data (no folder_id) :param bool update_parent_if_changed: updates self.parent with new parent Folder if changed :return: Refreshed or Not :rtype: bool
[ "Re", "-", "download", "folder", "data", "Inbox", "Folder", "will", "be", "unable", "to", "download", "its", "own", "data", "(", "no", "folder_id", ")" ]
python
train
storax/upme
src/upme/main.py
https://github.com/storax/upme/blob/41c2d91f922691e31ff940f33b755d2cb64dfef8/src/upme/main.py#L84-L92
def restart(): """Restart the application the same way it was started :returns: None :rtype: None :raises: SystemExit """ python = sys.executable os.execl(python, python, * sys.argv)
[ "def", "restart", "(", ")", ":", "python", "=", "sys", ".", "executable", "os", ".", "execl", "(", "python", ",", "python", ",", "*", "sys", ".", "argv", ")" ]
Restart the application the same way it was started :returns: None :rtype: None :raises: SystemExit
[ "Restart", "the", "application", "the", "same", "way", "it", "was", "started" ]
python
train
jupyterhub/kubespawner
kubespawner/objects.py
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/objects.py#L393-L433
def make_pvc( name, storage_class, access_modes, storage, labels=None, annotations=None, ): """ Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc """ pvc = V1PersistentVolumeClaim() pvc.kind = "PersistentVolumeClaim" pvc.api_version = "v1" pvc.metadata = V1ObjectMeta() pvc.metadata.name = name pvc.metadata.annotations = (annotations or {}).copy() pvc.metadata.labels = (labels or {}).copy() pvc.spec = V1PersistentVolumeClaimSpec() pvc.spec.access_modes = access_modes pvc.spec.resources = V1ResourceRequirements() pvc.spec.resources.requests = {"storage": storage} if storage_class: pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class}) pvc.spec.storage_class_name = storage_class return pvc
[ "def", "make_pvc", "(", "name", ",", "storage_class", ",", "access_modes", ",", "storage", ",", "labels", "=", "None", ",", "annotations", "=", "None", ",", ")", ":", "pvc", "=", "V1PersistentVolumeClaim", "(", ")", "pvc", ".", "kind", "=", "\"PersistentVolumeClaim\"", "pvc", ".", "api_version", "=", "\"v1\"", "pvc", ".", "metadata", "=", "V1ObjectMeta", "(", ")", "pvc", ".", "metadata", ".", "name", "=", "name", "pvc", ".", "metadata", ".", "annotations", "=", "(", "annotations", "or", "{", "}", ")", ".", "copy", "(", ")", "pvc", ".", "metadata", ".", "labels", "=", "(", "labels", "or", "{", "}", ")", ".", "copy", "(", ")", "pvc", ".", "spec", "=", "V1PersistentVolumeClaimSpec", "(", ")", "pvc", ".", "spec", ".", "access_modes", "=", "access_modes", "pvc", ".", "spec", ".", "resources", "=", "V1ResourceRequirements", "(", ")", "pvc", ".", "spec", ".", "resources", ".", "requests", "=", "{", "\"storage\"", ":", "storage", "}", "if", "storage_class", ":", "pvc", ".", "metadata", ".", "annotations", ".", "update", "(", "{", "\"volume.beta.kubernetes.io/storage-class\"", ":", "storage_class", "}", ")", "pvc", ".", "spec", ".", "storage_class_name", "=", "storage_class", "return", "pvc" ]
Make a k8s pvc specification for running a user notebook. Parameters ---------- name: Name of persistent volume claim. Must be unique within the namespace the object is going to be created in. Must be a valid DNS label. storage_class: String of the name of the k8s Storage Class to use. access_modes: A list of specifying what access mode the pod should have towards the pvc storage: The ammount of storage needed for the pvc
[ "Make", "a", "k8s", "pvc", "specification", "for", "running", "a", "user", "notebook", "." ]
python
train
scottgigante/tasklogger
tasklogger/logger.py
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/logger.py#L64-L96
def set_level(self, level=1): """Set the logging level Parameters ---------- level : `int` or `bool` (optional, default: 1) If False or 0, prints WARNING and higher messages. If True or 1, prints INFO and higher messages. If 2 or higher, prints all messages. """ if level is True or level == 1: level = logging.INFO level_name = "INFO" elif level is False or level <= 0: level = logging.WARNING level_name = "WARNING" elif level >= 2: level = logging.DEBUG level_name = "DEBUG" if not self.logger.handlers: self.logger.tasklogger = self self.logger.propagate = False handler = logging.StreamHandler( stream=stream.RSafeStream(stream=self.stream)) handler.setFormatter(logging.Formatter(fmt='%(message)s')) self.logger.addHandler(handler) if level != self.logger.level: self.level = level self.logger.setLevel(level) self.debug("Set {} logging to {}".format( self.name, level_name))
[ "def", "set_level", "(", "self", ",", "level", "=", "1", ")", ":", "if", "level", "is", "True", "or", "level", "==", "1", ":", "level", "=", "logging", ".", "INFO", "level_name", "=", "\"INFO\"", "elif", "level", "is", "False", "or", "level", "<=", "0", ":", "level", "=", "logging", ".", "WARNING", "level_name", "=", "\"WARNING\"", "elif", "level", ">=", "2", ":", "level", "=", "logging", ".", "DEBUG", "level_name", "=", "\"DEBUG\"", "if", "not", "self", ".", "logger", ".", "handlers", ":", "self", ".", "logger", ".", "tasklogger", "=", "self", "self", ".", "logger", ".", "propagate", "=", "False", "handler", "=", "logging", ".", "StreamHandler", "(", "stream", "=", "stream", ".", "RSafeStream", "(", "stream", "=", "self", ".", "stream", ")", ")", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "fmt", "=", "'%(message)s'", ")", ")", "self", ".", "logger", ".", "addHandler", "(", "handler", ")", "if", "level", "!=", "self", ".", "logger", ".", "level", ":", "self", ".", "level", "=", "level", "self", ".", "logger", ".", "setLevel", "(", "level", ")", "self", ".", "debug", "(", "\"Set {} logging to {}\"", ".", "format", "(", "self", ".", "name", ",", "level_name", ")", ")" ]
Set the logging level Parameters ---------- level : `int` or `bool` (optional, default: 1) If False or 0, prints WARNING and higher messages. If True or 1, prints INFO and higher messages. If 2 or higher, prints all messages.
[ "Set", "the", "logging", "level" ]
python
train
acrazing/dbapi
dbapi/Group.py
https://github.com/acrazing/dbapi/blob/8c1f85cb1a051daf7be1fc97a62c4499983e9898/dbapi/Group.py#L275-L285
def list_liked_topics(self, user_alias=None, start=0): """ 喜欢过的话题 :param user_alias: 指定用户,默认当前 :param start: 翻页 :return: 带下一页的列表 """ user_alias = user_alias or self.api.user_alias xml = self.api.xml(API_GROUP_LIST_USER_LIKED_TOPICS % user_alias, params={'start': start}) return build_list_result(self._parse_topic_table(xml, 'title,comment,time,group'), xml)
[ "def", "list_liked_topics", "(", "self", ",", "user_alias", "=", "None", ",", "start", "=", "0", ")", ":", "user_alias", "=", "user_alias", "or", "self", ".", "api", ".", "user_alias", "xml", "=", "self", ".", "api", ".", "xml", "(", "API_GROUP_LIST_USER_LIKED_TOPICS", "%", "user_alias", ",", "params", "=", "{", "'start'", ":", "start", "}", ")", "return", "build_list_result", "(", "self", ".", "_parse_topic_table", "(", "xml", ",", "'title,comment,time,group'", ")", ",", "xml", ")" ]
喜欢过的话题 :param user_alias: 指定用户,默认当前 :param start: 翻页 :return: 带下一页的列表
[ "喜欢过的话题", ":", "param", "user_alias", ":", "指定用户,默认当前", ":", "param", "start", ":", "翻页", ":", "return", ":", "带下一页的列表" ]
python
train
EconForge/dolo
dolo/compiler/function_compiler_sympy.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/compiler/function_compiler_sympy.py#L63-L190
def compile_higher_order_function(eqs, syms, params, order=2, funname='anonymous', return_code=False, compile=False): '''From a list of equations and variables, define a multivariate functions with higher order derivatives.''' from dolang.symbolic import stringify, stringify_symbol vars = [s[0] for s in syms] # TEMP: compatibility fix when eqs is an Odict: eqs = [eq for eq in eqs] if isinstance(eqs[0], str): # elif not isinstance(eqs[0], sympy.Basic): # assume we have ASTs eqs = list([ast.parse(eq).body[0] for eq in eqs]) eqs_std = list( [stringify_symbol(eq, variables=vars) for eq in eqs] ) eqs_sym = list( [ast_to_sympy(eq) for eq in eqs_std] ) else: eqs_sym = eqs symsd = list( [stringify_symbol((a,b)) for a,b in syms] ) paramsd = list( [stringify_symbol(a) for a in params] ) D = higher_order_diff(eqs_sym, symsd, order=order) txt = """def {funname}(x, p, order=1): import numpy from numpy import log, exp, tan, sqrt from numpy import pi as pi_ from numpy import inf as inf_ from scipy.special import erfc """.format(funname=funname) for i in range(len(syms)): txt += " {} = x[{}]\n".format(symsd[i], i) txt += "\n" for i in range(len(params)): txt += " {} = p[{}]\n".format(paramsd[i], i) txt += "\n out = numpy.zeros({})".format(len(eqs)) for i in range(len(eqs)): txt += "\n out[{}] = {}".format(i, D[0][i]) txt += """ if order == 0: return out """ if order >= 1: # Jacobian txt += " out_1 = numpy.zeros(({},{}))\n".format(len(eqs), len(syms)) for i in range(len(eqs)): for j in range(len(syms)): val = D[1][i,j] if val != 0: txt += " out_1[{},{}] = {}\n".format(i,j,D[1][i,j]) txt += """ if order == 1: return [out, out_1] """ if order >= 2: # Hessian txt += " out_2 = numpy.zeros(({},{},{}))\n".format(len(eqs), len(syms), len(syms)) for n in range(len(eqs)): for i in range(len(syms)): for j in range(len(syms)): val = D[2][n,i,j] if val is not None: if val != 0: txt += " out_2[{},{},{}] = {}\n".format(n,i,j,D[2][n,i,j]) else: i1, j1 = sorted( (i,j) ) if D[2][n,i1,j1] != 0: txt += " out_2[{},{},{}] = out_2[{},{},{}]\n".format(n,i,j,n,i1,j1) txt += """ if order == 2: return [out, out_1, out_2] """ if order >= 3: # Hessian txt += " out_3 = numpy.zeros(({},{},{},{}))\n".format(len(eqs), len(syms), len(syms), len(syms)) for n in range(len(eqs)): for i in range(len(syms)): for j in range(len(syms)): for k in range(len(syms)): val = D[3][n,i,j,k] if val is not None: if val != 0: txt += " out_3[{},{},{},{}] = {}\n".format(n,i,j,k,D[3][n,i,j,k]) else: i1, j1, k1 = sorted( (i,j,k) ) if D[3][n,i1,j1,k1] != 0: txt += " out_3[{},{},{},{}] = out_3[{},{},{},{}]\n".format(n,i,j,k,n,i1,j1,k1) txt += """ if order == 3: return [out, out_1, out_2, out_3] """ if return_code: return txt else: d = {} exec(txt, d) fun = d[funname] if compile: raise Exception("Not implemented.") return fun
[ "def", "compile_higher_order_function", "(", "eqs", ",", "syms", ",", "params", ",", "order", "=", "2", ",", "funname", "=", "'anonymous'", ",", "return_code", "=", "False", ",", "compile", "=", "False", ")", ":", "from", "dolang", ".", "symbolic", "import", "stringify", ",", "stringify_symbol", "vars", "=", "[", "s", "[", "0", "]", "for", "s", "in", "syms", "]", "# TEMP: compatibility fix when eqs is an Odict:", "eqs", "=", "[", "eq", "for", "eq", "in", "eqs", "]", "if", "isinstance", "(", "eqs", "[", "0", "]", ",", "str", ")", ":", "# elif not isinstance(eqs[0], sympy.Basic):", "# assume we have ASTs", "eqs", "=", "list", "(", "[", "ast", ".", "parse", "(", "eq", ")", ".", "body", "[", "0", "]", "for", "eq", "in", "eqs", "]", ")", "eqs_std", "=", "list", "(", "[", "stringify_symbol", "(", "eq", ",", "variables", "=", "vars", ")", "for", "eq", "in", "eqs", "]", ")", "eqs_sym", "=", "list", "(", "[", "ast_to_sympy", "(", "eq", ")", "for", "eq", "in", "eqs_std", "]", ")", "else", ":", "eqs_sym", "=", "eqs", "symsd", "=", "list", "(", "[", "stringify_symbol", "(", "(", "a", ",", "b", ")", ")", "for", "a", ",", "b", "in", "syms", "]", ")", "paramsd", "=", "list", "(", "[", "stringify_symbol", "(", "a", ")", "for", "a", "in", "params", "]", ")", "D", "=", "higher_order_diff", "(", "eqs_sym", ",", "symsd", ",", "order", "=", "order", ")", "txt", "=", "\"\"\"def {funname}(x, p, order=1):\n\n import numpy\n from numpy import log, exp, tan, sqrt\n from numpy import pi as pi_\n from numpy import inf as inf_\n from scipy.special import erfc\n\n\"\"\"", ".", "format", "(", "funname", "=", "funname", ")", "for", "i", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "txt", "+=", "\" {} = x[{}]\\n\"", ".", "format", "(", "symsd", "[", "i", "]", ",", "i", ")", "txt", "+=", "\"\\n\"", "for", "i", "in", "range", "(", "len", "(", "params", ")", ")", ":", "txt", "+=", "\" {} = p[{}]\\n\"", ".", "format", "(", "paramsd", "[", "i", "]", ",", "i", ")", "txt", "+=", "\"\\n out = numpy.zeros({})\"", ".", "format", "(", "len", "(", "eqs", ")", ")", "for", "i", "in", "range", "(", "len", "(", "eqs", ")", ")", ":", "txt", "+=", "\"\\n out[{}] = {}\"", ".", "format", "(", "i", ",", "D", "[", "0", "]", "[", "i", "]", ")", "txt", "+=", "\"\"\"\n\n if order == 0:\n return out\n\n\"\"\"", "if", "order", ">=", "1", ":", "# Jacobian", "txt", "+=", "\" out_1 = numpy.zeros(({},{}))\\n\"", ".", "format", "(", "len", "(", "eqs", ")", ",", "len", "(", "syms", ")", ")", "for", "i", "in", "range", "(", "len", "(", "eqs", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "val", "=", "D", "[", "1", "]", "[", "i", ",", "j", "]", "if", "val", "!=", "0", ":", "txt", "+=", "\" out_1[{},{}] = {}\\n\"", ".", "format", "(", "i", ",", "j", ",", "D", "[", "1", "]", "[", "i", ",", "j", "]", ")", "txt", "+=", "\"\"\"\n\n if order == 1:\n return [out, out_1]\n\n\"\"\"", "if", "order", ">=", "2", ":", "# Hessian", "txt", "+=", "\" out_2 = numpy.zeros(({},{},{}))\\n\"", ".", "format", "(", "len", "(", "eqs", ")", ",", "len", "(", "syms", ")", ",", "len", "(", "syms", ")", ")", "for", "n", "in", "range", "(", "len", "(", "eqs", ")", ")", ":", "for", "i", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "val", "=", "D", "[", "2", "]", "[", "n", ",", "i", ",", "j", "]", "if", "val", "is", "not", "None", ":", "if", "val", "!=", "0", ":", "txt", "+=", "\" out_2[{},{},{}] = {}\\n\"", ".", "format", "(", "n", ",", "i", ",", "j", ",", "D", "[", "2", "]", "[", "n", ",", "i", ",", "j", "]", ")", "else", ":", "i1", ",", "j1", "=", "sorted", "(", "(", "i", ",", "j", ")", ")", "if", "D", "[", "2", "]", "[", "n", ",", "i1", ",", "j1", "]", "!=", "0", ":", "txt", "+=", "\" out_2[{},{},{}] = out_2[{},{},{}]\\n\"", ".", "format", "(", "n", ",", "i", ",", "j", ",", "n", ",", "i1", ",", "j1", ")", "txt", "+=", "\"\"\"\n\n if order == 2:\n return [out, out_1, out_2]\n\n\"\"\"", "if", "order", ">=", "3", ":", "# Hessian", "txt", "+=", "\" out_3 = numpy.zeros(({},{},{},{}))\\n\"", ".", "format", "(", "len", "(", "eqs", ")", ",", "len", "(", "syms", ")", ",", "len", "(", "syms", ")", ",", "len", "(", "syms", ")", ")", "for", "n", "in", "range", "(", "len", "(", "eqs", ")", ")", ":", "for", "i", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "for", "k", "in", "range", "(", "len", "(", "syms", ")", ")", ":", "val", "=", "D", "[", "3", "]", "[", "n", ",", "i", ",", "j", ",", "k", "]", "if", "val", "is", "not", "None", ":", "if", "val", "!=", "0", ":", "txt", "+=", "\" out_3[{},{},{},{}] = {}\\n\"", ".", "format", "(", "n", ",", "i", ",", "j", ",", "k", ",", "D", "[", "3", "]", "[", "n", ",", "i", ",", "j", ",", "k", "]", ")", "else", ":", "i1", ",", "j1", ",", "k1", "=", "sorted", "(", "(", "i", ",", "j", ",", "k", ")", ")", "if", "D", "[", "3", "]", "[", "n", ",", "i1", ",", "j1", ",", "k1", "]", "!=", "0", ":", "txt", "+=", "\" out_3[{},{},{},{}] = out_3[{},{},{},{}]\\n\"", ".", "format", "(", "n", ",", "i", ",", "j", ",", "k", ",", "n", ",", "i1", ",", "j1", ",", "k1", ")", "txt", "+=", "\"\"\"\n\n if order == 3:\n return [out, out_1, out_2, out_3]\n \"\"\"", "if", "return_code", ":", "return", "txt", "else", ":", "d", "=", "{", "}", "exec", "(", "txt", ",", "d", ")", "fun", "=", "d", "[", "funname", "]", "if", "compile", ":", "raise", "Exception", "(", "\"Not implemented.\"", ")", "return", "fun" ]
From a list of equations and variables, define a multivariate functions with higher order derivatives.
[ "From", "a", "list", "of", "equations", "and", "variables", "define", "a", "multivariate", "functions", "with", "higher", "order", "derivatives", "." ]
python
train
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L334-L346
def register_new_edge(edge_id, first_char_index, last_char_index, source_node_id, dest_node_id): """Factory method, registers new edge. """ event = Edge.Created( originator_id=edge_id, first_char_index=first_char_index, last_char_index=last_char_index, source_node_id=source_node_id, dest_node_id=dest_node_id, ) entity = Edge.mutate(event=event) publish(event) return entity
[ "def", "register_new_edge", "(", "edge_id", ",", "first_char_index", ",", "last_char_index", ",", "source_node_id", ",", "dest_node_id", ")", ":", "event", "=", "Edge", ".", "Created", "(", "originator_id", "=", "edge_id", ",", "first_char_index", "=", "first_char_index", ",", "last_char_index", "=", "last_char_index", ",", "source_node_id", "=", "source_node_id", ",", "dest_node_id", "=", "dest_node_id", ",", ")", "entity", "=", "Edge", ".", "mutate", "(", "event", "=", "event", ")", "publish", "(", "event", ")", "return", "entity" ]
Factory method, registers new edge.
[ "Factory", "method", "registers", "new", "edge", "." ]
python
train