repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
wal-e/wal-e
wal_e/blobstore/file/calling_format.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/blobstore/file/calling_format.py#L6-L12
def remove_empty_dirs(path): """ removes empty dirs under a given path """ for root, dirs, files in os.walk(path): for d in dirs: dir_path = os.path.join(root, d) if not os.listdir(dir_path): os.rmdir(dir_path)
[ "def", "remove_empty_dirs", "(", "path", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "d", "in", "dirs", ":", "dir_path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "d", ")", "if", "not", "os", ".", "listdir", "(", "dir_path", ")", ":", "os", ".", "rmdir", "(", "dir_path", ")" ]
removes empty dirs under a given path
[ "removes", "empty", "dirs", "under", "a", "given", "path" ]
python
train
37.142857
pyQode/pyqode.python
pyqode/python/modes/autoindent.py
https://github.com/pyQode/pyqode.python/blob/821e000ea2e2638a82ce095a559e69afd9bd4f38/pyqode/python/modes/autoindent.py#L301-L309
def _at_block_start(tc, line): """ Improve QTextCursor.atBlockStart to ignore spaces """ if tc.atBlockStart(): return True column = tc.columnNumber() indentation = len(line) - len(line.lstrip()) return column <= indentation
[ "def", "_at_block_start", "(", "tc", ",", "line", ")", ":", "if", "tc", ".", "atBlockStart", "(", ")", ":", "return", "True", "column", "=", "tc", ".", "columnNumber", "(", ")", "indentation", "=", "len", "(", "line", ")", "-", "len", "(", "line", ".", "lstrip", "(", ")", ")", "return", "column", "<=", "indentation" ]
Improve QTextCursor.atBlockStart to ignore spaces
[ "Improve", "QTextCursor", ".", "atBlockStart", "to", "ignore", "spaces" ]
python
valid
31.444444
OpenMath/py-openmath
openmath/helpers.py
https://github.com/OpenMath/py-openmath/blob/4906aa9ccf606f533675c28823772e07c30fd220/openmath/helpers.py#L237-L287
def interpretAsOpenMath(x): """tries to convert a Python object into an OpenMath object this is not a replacement for using a Converter for exporting Python objects instead, it is used conveniently building OM objects in DSL embedded in Python inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder""" if hasattr(x, "_ishelper") and x._ishelper: # wrapped things in this class -> unwrap return x._toOM() elif isinstance(x, om.OMAny): # already OM return x elif isinstance(x, six.integer_types): # integers -> OMI return om.OMInteger(x) elif isinstance(x, float): # floats -> OMF return om.OMFloat(x) elif isinstance(x, six.string_types): # strings -> OMSTR return om.OMString(x) elif isinstance(x, WrappedHelper): # wrapper -> wrapped object return x.toOM() elif inspect.isfunction(x): # function -> OMBIND(lambda,...) # get all the parameters of the function paramMap = inspect.signature(x).parameters params = [v for k, v in six.iteritems(paramMap)] # make sure that all of them are positional posArgKinds = [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD] if not all([p.kind in posArgKinds for p in params]): raise CannotInterpretAsOpenMath("no sequence arguments allowed") # call the function with appropriate OMVariables paramsOM = [om.OMVariable(name=p.name) for p in params] bodyOM = interpretAsOpenMath(x(*paramsOM)) return OMBinding(om.OMSymbol(name="lambda", cd="python", cdbase="http://python.org"), paramsOM, bodyOM) else: # fail raise CannotInterpretAsOpenMath("unknown kind of object: " + str(x))
[ "def", "interpretAsOpenMath", "(", "x", ")", ":", "if", "hasattr", "(", "x", ",", "\"_ishelper\"", ")", "and", "x", ".", "_ishelper", ":", "# wrapped things in this class -> unwrap", "return", "x", ".", "_toOM", "(", ")", "elif", "isinstance", "(", "x", ",", "om", ".", "OMAny", ")", ":", "# already OM", "return", "x", "elif", "isinstance", "(", "x", ",", "six", ".", "integer_types", ")", ":", "# integers -> OMI", "return", "om", ".", "OMInteger", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "float", ")", ":", "# floats -> OMF", "return", "om", ".", "OMFloat", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ":", "# strings -> OMSTR", "return", "om", ".", "OMString", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "WrappedHelper", ")", ":", "# wrapper -> wrapped object", "return", "x", ".", "toOM", "(", ")", "elif", "inspect", ".", "isfunction", "(", "x", ")", ":", "# function -> OMBIND(lambda,...)", "# get all the parameters of the function", "paramMap", "=", "inspect", ".", "signature", "(", "x", ")", ".", "parameters", "params", "=", "[", "v", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "paramMap", ")", "]", "# make sure that all of them are positional", "posArgKinds", "=", "[", "inspect", ".", "Parameter", ".", "POSITIONAL_ONLY", ",", "inspect", ".", "Parameter", ".", "POSITIONAL_OR_KEYWORD", "]", "if", "not", "all", "(", "[", "p", ".", "kind", "in", "posArgKinds", "for", "p", "in", "params", "]", ")", ":", "raise", "CannotInterpretAsOpenMath", "(", "\"no sequence arguments allowed\"", ")", "# call the function with appropriate OMVariables", "paramsOM", "=", "[", "om", ".", "OMVariable", "(", "name", "=", "p", ".", "name", ")", "for", "p", "in", "params", "]", "bodyOM", "=", "interpretAsOpenMath", "(", "x", "(", "*", "paramsOM", ")", ")", "return", "OMBinding", "(", "om", ".", "OMSymbol", "(", "name", "=", "\"lambda\"", ",", "cd", "=", "\"python\"", ",", "cdbase", "=", "\"http://python.org\"", ")", ",", "paramsOM", ",", "bodyOM", ")", "else", ":", "# fail", "raise", "CannotInterpretAsOpenMath", "(", "\"unknown kind of object: \"", "+", "str", "(", "x", ")", ")" ]
tries to convert a Python object into an OpenMath object this is not a replacement for using a Converter for exporting Python objects instead, it is used conveniently building OM objects in DSL embedded in Python inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder
[ "tries", "to", "convert", "a", "Python", "object", "into", "an", "OpenMath", "object", "this", "is", "not", "a", "replacement", "for", "using", "a", "Converter", "for", "exporting", "Python", "objects", "instead", "it", "is", "used", "conveniently", "building", "OM", "objects", "in", "DSL", "embedded", "in", "Python", "inparticular", "it", "converts", "Python", "functions", "into", "OMBinding", "objects", "using", "lambdaOM", "as", "the", "binder" ]
python
test
36.313725
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10562-L10570
def adjust_all_to_360(dictionary): """ Take a dictionary and check each key/value pair. If this key is of type: declination/longitude/azimuth/direction, adjust it to be within 0-360 as required by the MagIC data model """ for key in dictionary: dictionary[key] = adjust_to_360(dictionary[key], key) return dictionary
[ "def", "adjust_all_to_360", "(", "dictionary", ")", ":", "for", "key", "in", "dictionary", ":", "dictionary", "[", "key", "]", "=", "adjust_to_360", "(", "dictionary", "[", "key", "]", ",", "key", ")", "return", "dictionary" ]
Take a dictionary and check each key/value pair. If this key is of type: declination/longitude/azimuth/direction, adjust it to be within 0-360 as required by the MagIC data model
[ "Take", "a", "dictionary", "and", "check", "each", "key", "/", "value", "pair", ".", "If", "this", "key", "is", "of", "type", ":", "declination", "/", "longitude", "/", "azimuth", "/", "direction", "adjust", "it", "to", "be", "within", "0", "-", "360", "as", "required", "by", "the", "MagIC", "data", "model" ]
python
train
38.222222
adaptive-learning/proso-apps
proso_common/views.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_common/views.py#L29-L43
def show_one(request, post_process_fun, object_class, id, template='common_json.html'): """ Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API """ obj = get_object_or_404(object_class, pk=id) json = post_process_fun(request, obj) return render_json(request, json, template=template, help_text=show_one.__doc__)
[ "def", "show_one", "(", "request", ",", "post_process_fun", ",", "object_class", ",", "id", ",", "template", "=", "'common_json.html'", ")", ":", "obj", "=", "get_object_or_404", "(", "object_class", ",", "pk", "=", "id", ")", "json", "=", "post_process_fun", "(", "request", ",", "obj", ")", "return", "render_json", "(", "request", ",", "json", ",", "template", "=", "template", ",", "help_text", "=", "show_one", ".", "__doc__", ")" ]
Return object of the given type with the specified identifier. GET parameters: user: identifier of the current user stats: turn on the enrichment of the objects by some statistics html turn on the HTML version of the API
[ "Return", "object", "of", "the", "given", "type", "with", "the", "specified", "identifier", "." ]
python
train
35.8
ZELLMECHANIK-DRESDEN/dclab
dclab/rtdc_dataset/core.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/rtdc_dataset/core.py#L206-L213
def features(self): """All available features""" mycols = [] for col in dfn.feature_names: if col in self: mycols.append(col) mycols.sort() return mycols
[ "def", "features", "(", "self", ")", ":", "mycols", "=", "[", "]", "for", "col", "in", "dfn", ".", "feature_names", ":", "if", "col", "in", "self", ":", "mycols", ".", "append", "(", "col", ")", "mycols", ".", "sort", "(", ")", "return", "mycols" ]
All available features
[ "All", "available", "features" ]
python
train
26.75
bureaucratic-labs/models
b_labs_models/segmentation.py
https://github.com/bureaucratic-labs/models/blob/e674c622af883abd094293fb20c325742080577a/b_labs_models/segmentation.py#L76-L98
def text2labels(text, sents): ''' Marks all characters in given `text`, that doesn't exists within any element of `sents` with `1` character, other characters (within sentences) will be marked with `0` Used in training process >>> text = 'привет. меня зовут аня.' >>> sents = ['привет.', 'меня зовут аня.'] >>> labels = text2labels(text, sents) >>> ' '.join(text) >>> 'п р и в е т . м е н я з о в у т а н я .' >>> ' '.join(labels) >>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0' ''' labels = [c for c in text] for sent in sents: start = text.index(sent) finish = start + len(sent) labels[start:finish] = '0' * len(sent) for i, c in enumerate(labels): if c != '0': labels[i] = '1' return labels
[ "def", "text2labels", "(", "text", ",", "sents", ")", ":", "labels", "=", "[", "c", "for", "c", "in", "text", "]", "for", "sent", "in", "sents", ":", "start", "=", "text", ".", "index", "(", "sent", ")", "finish", "=", "start", "+", "len", "(", "sent", ")", "labels", "[", "start", ":", "finish", "]", "=", "'0'", "*", "len", "(", "sent", ")", "for", "i", ",", "c", "in", "enumerate", "(", "labels", ")", ":", "if", "c", "!=", "'0'", ":", "labels", "[", "i", "]", "=", "'1'", "return", "labels" ]
Marks all characters in given `text`, that doesn't exists within any element of `sents` with `1` character, other characters (within sentences) will be marked with `0` Used in training process >>> text = 'привет. меня зовут аня.' >>> sents = ['привет.', 'меня зовут аня.'] >>> labels = text2labels(text, sents) >>> ' '.join(text) >>> 'п р и в е т . м е н я з о в у т а н я .' >>> ' '.join(labels) >>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
[ "Marks", "all", "characters", "in", "given", "text", "that", "doesn", "t", "exists", "within", "any", "element", "of", "sents", "with", "1", "character", "other", "characters", "(", "within", "sentences", ")", "will", "be", "marked", "with", "0", "Used", "in", "training", "process", ">>>", "text", "=", "привет", ".", "меня", "зовут", "аня", ".", ">>>", "sents", "=", "[", "привет", ".", "меня", "зовут", "аня", ".", "]", ">>>", "labels", "=", "text2labels", "(", "text", "sents", ")", ">>>", ".", "join", "(", "text", ")", ">>>", "п", "р", "и", "в", "е", "т", ".", "м", "е", "н", "я", "з", "о", "в", "у", "т", "а", "н", "я", ".", ">>>", ".", "join", "(", "labels", ")", ">>>", "0", "0", "0", "0", "0", "0", "0", "1", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0" ]
python
train
34.478261
rupertford/melody
src/melody/search.py
https://github.com/rupertford/melody/blob/d50459880a87fdd1802c6893f6e12b52d51b3b91/src/melody/search.py#L121-L140
def _recurse(self, inputs, output): '''internal recursion routine called by the run method that generates all input combinations''' if inputs: my_input = inputs[0] name = my_input.name if my_input.state: my_options = my_input.options(self.state) else: my_options = my_input.options for option in my_options: my_output = list(output) my_output.append({name: option}) self._recurse(inputs[1:], my_output) else: try: valid, result = self._function(output) except ValueError: raise RuntimeError("function must return 2 values") print output, valid, result
[ "def", "_recurse", "(", "self", ",", "inputs", ",", "output", ")", ":", "if", "inputs", ":", "my_input", "=", "inputs", "[", "0", "]", "name", "=", "my_input", ".", "name", "if", "my_input", ".", "state", ":", "my_options", "=", "my_input", ".", "options", "(", "self", ".", "state", ")", "else", ":", "my_options", "=", "my_input", ".", "options", "for", "option", "in", "my_options", ":", "my_output", "=", "list", "(", "output", ")", "my_output", ".", "append", "(", "{", "name", ":", "option", "}", ")", "self", ".", "_recurse", "(", "inputs", "[", "1", ":", "]", ",", "my_output", ")", "else", ":", "try", ":", "valid", ",", "result", "=", "self", ".", "_function", "(", "output", ")", "except", "ValueError", ":", "raise", "RuntimeError", "(", "\"function must return 2 values\"", ")", "print", "output", ",", "valid", ",", "result" ]
internal recursion routine called by the run method that generates all input combinations
[ "internal", "recursion", "routine", "called", "by", "the", "run", "method", "that", "generates", "all", "input", "combinations" ]
python
test
38.6
spyder-ide/spyder-kernels
spyder_kernels/console/kernel.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L274-L280
def get_source(self, objtxt): """Get object source""" from spyder_kernels.utils.dochelpers import getsource obj, valid = self._eval(objtxt) if valid: return getsource(obj)
[ "def", "get_source", "(", "self", ",", "objtxt", ")", ":", "from", "spyder_kernels", ".", "utils", ".", "dochelpers", "import", "getsource", "obj", ",", "valid", "=", "self", ".", "_eval", "(", "objtxt", ")", "if", "valid", ":", "return", "getsource", "(", "obj", ")" ]
Get object source
[ "Get", "object", "source" ]
python
train
30
PlaidWeb/Publ
publ/category.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L127-L131
def description(self): """ Get the textual description of the category """ if self._meta and self._meta.get_payload(): return utils.TrueCallableProxy(self._description) return utils.CallableProxy(None)
[ "def", "description", "(", "self", ")", ":", "if", "self", ".", "_meta", "and", "self", ".", "_meta", ".", "get_payload", "(", ")", ":", "return", "utils", ".", "TrueCallableProxy", "(", "self", ".", "_description", ")", "return", "utils", ".", "CallableProxy", "(", "None", ")" ]
Get the textual description of the category
[ "Get", "the", "textual", "description", "of", "the", "category" ]
python
train
46.6
dswah/pyGAM
pygam/links.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/links.py#L136-L149
def gradient(self, mu, dist): """ derivative of the link function wrt mu Parameters ---------- mu : array-like of legth n dist : Distribution instance Returns ------- grad : np.array of length n """ return dist.levels/(mu*(dist.levels - mu))
[ "def", "gradient", "(", "self", ",", "mu", ",", "dist", ")", ":", "return", "dist", ".", "levels", "/", "(", "mu", "*", "(", "dist", ".", "levels", "-", "mu", ")", ")" ]
derivative of the link function wrt mu Parameters ---------- mu : array-like of legth n dist : Distribution instance Returns ------- grad : np.array of length n
[ "derivative", "of", "the", "link", "function", "wrt", "mu" ]
python
train
22.714286
ndokter/dsmr_parser
dsmr_parser/clients/serial_.py
https://github.com/ndokter/dsmr_parser/blob/c04b0a5add58ce70153eede1a87ca171876b61c7/dsmr_parser/clients/serial_.py#L51-L78
def read(self, queue): """ Read complete DSMR telegram's from the serial interface and parse it into CosemObject's and MbusObject's. Instead of being a generator, values are pushed to provided queue for asynchronous processing. :rtype: None """ # create Serial StreamReader conn = serial_asyncio.open_serial_connection(**self.serial_settings) reader, _ = yield from conn while True: # Read line if available or give control back to loop until new # data has arrived. data = yield from reader.readline() self.telegram_buffer.append(data.decode('ascii')) for telegram in self.telegram_buffer.get_all(): try: # Push new parsed telegram onto queue. queue.put_nowait( self.telegram_parser.parse(telegram) ) except ParseError as e: logger.warning('Failed to parse telegram: %s', e)
[ "def", "read", "(", "self", ",", "queue", ")", ":", "# create Serial StreamReader", "conn", "=", "serial_asyncio", ".", "open_serial_connection", "(", "*", "*", "self", ".", "serial_settings", ")", "reader", ",", "_", "=", "yield", "from", "conn", "while", "True", ":", "# Read line if available or give control back to loop until new", "# data has arrived.", "data", "=", "yield", "from", "reader", ".", "readline", "(", ")", "self", ".", "telegram_buffer", ".", "append", "(", "data", ".", "decode", "(", "'ascii'", ")", ")", "for", "telegram", "in", "self", ".", "telegram_buffer", ".", "get_all", "(", ")", ":", "try", ":", "# Push new parsed telegram onto queue.", "queue", ".", "put_nowait", "(", "self", ".", "telegram_parser", ".", "parse", "(", "telegram", ")", ")", "except", "ParseError", "as", "e", ":", "logger", ".", "warning", "(", "'Failed to parse telegram: %s'", ",", "e", ")" ]
Read complete DSMR telegram's from the serial interface and parse it into CosemObject's and MbusObject's. Instead of being a generator, values are pushed to provided queue for asynchronous processing. :rtype: None
[ "Read", "complete", "DSMR", "telegram", "s", "from", "the", "serial", "interface", "and", "parse", "it", "into", "CosemObject", "s", "and", "MbusObject", "s", "." ]
python
test
37
unistra/django-rest-framework-fine-permissions
rest_framework_fine_permissions/management/commands/fine_permissions_load.py
https://github.com/unistra/django-rest-framework-fine-permissions/blob/71af5953648ef9f9bdfb64a4c0ed0ea62661fa61/rest_framework_fine_permissions/management/commands/fine_permissions_load.py#L26-L71
def handle(self, *args, **options): """ dump fields permissions for a user """ def get_user(username): try: return User.objects.get(username=username) except ObjectDoesNotExist as e: raise CommandError("This user doesn't exist in the database") def add_permissions(user_field_permissions, content_type, name): p = None try: p = FieldPermission.objects.get(content_type=content_type, name=name) except ObjectDoesNotExist: p = FieldPermission(content_type=content_type, name=name) p.save() finally: user_field_permissions.permissions.add(p) if len(args) !=1: raise CommandError("Specifies a json file created by the fine_permissions_dump command") else: try: with open(args[0], 'r') as json_file: myjson = json.load(json_file) user = get_user(options.get('user')) if options['user'] else get_user(myjson['username']) fields_permissions = myjson['fields_permissions'] user_field_permissions = UserFieldPermissions(user=user) user_field_permissions.save() for f in fields_permissions: content_type = ContentType.objects.get(app_label=f["app_label"], model=f["model"]) add_permissions(user_field_permissions, content_type, f['name']) except Exception as e: raise CommandError(e)
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "def", "get_user", "(", "username", ")", ":", "try", ":", "return", "User", ".", "objects", ".", "get", "(", "username", "=", "username", ")", "except", "ObjectDoesNotExist", "as", "e", ":", "raise", "CommandError", "(", "\"This user doesn't exist in the database\"", ")", "def", "add_permissions", "(", "user_field_permissions", ",", "content_type", ",", "name", ")", ":", "p", "=", "None", "try", ":", "p", "=", "FieldPermission", ".", "objects", ".", "get", "(", "content_type", "=", "content_type", ",", "name", "=", "name", ")", "except", "ObjectDoesNotExist", ":", "p", "=", "FieldPermission", "(", "content_type", "=", "content_type", ",", "name", "=", "name", ")", "p", ".", "save", "(", ")", "finally", ":", "user_field_permissions", ".", "permissions", ".", "add", "(", "p", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "raise", "CommandError", "(", "\"Specifies a json file created by the fine_permissions_dump command\"", ")", "else", ":", "try", ":", "with", "open", "(", "args", "[", "0", "]", ",", "'r'", ")", "as", "json_file", ":", "myjson", "=", "json", ".", "load", "(", "json_file", ")", "user", "=", "get_user", "(", "options", ".", "get", "(", "'user'", ")", ")", "if", "options", "[", "'user'", "]", "else", "get_user", "(", "myjson", "[", "'username'", "]", ")", "fields_permissions", "=", "myjson", "[", "'fields_permissions'", "]", "user_field_permissions", "=", "UserFieldPermissions", "(", "user", "=", "user", ")", "user_field_permissions", ".", "save", "(", ")", "for", "f", "in", "fields_permissions", ":", "content_type", "=", "ContentType", ".", "objects", ".", "get", "(", "app_label", "=", "f", "[", "\"app_label\"", "]", ",", "model", "=", "f", "[", "\"model\"", "]", ")", "add_permissions", "(", "user_field_permissions", ",", "content_type", ",", "f", "[", "'name'", "]", ")", "except", "Exception", "as", "e", ":", "raise", "CommandError", "(", "e", ")" ]
dump fields permissions for a user
[ "dump", "fields", "permissions", "for", "a", "user" ]
python
train
34.826087
davgeo/clear
clear/database.py
https://github.com/davgeo/clear/blob/5ec85d27efd28afddfcd4c3f44df17f0115a77aa/clear/database.py#L711-L715
def PrintAllTables(self): """ Prints contents of every table. """ goodlogging.Log.Info("DB", "Database contents:\n") for table in self._tableDict.keys(): self._PrintDatabaseTable(table)
[ "def", "PrintAllTables", "(", "self", ")", ":", "goodlogging", ".", "Log", ".", "Info", "(", "\"DB\"", ",", "\"Database contents:\\n\"", ")", "for", "table", "in", "self", ".", "_tableDict", ".", "keys", "(", ")", ":", "self", ".", "_PrintDatabaseTable", "(", "table", ")" ]
Prints contents of every table.
[ "Prints", "contents", "of", "every", "table", "." ]
python
train
39.8
saltstack/salt
salt/states/snapper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/snapper.py#L122-L131
def _get_baseline_from_tag(config, tag): ''' Returns the last created baseline snapshot marked with `tag` ''' last_snapshot = None for snapshot in __salt__['snapper.list_snapshots'](config): if tag == snapshot['userdata'].get("baseline_tag"): if not last_snapshot or last_snapshot['timestamp'] < snapshot['timestamp']: last_snapshot = snapshot return last_snapshot
[ "def", "_get_baseline_from_tag", "(", "config", ",", "tag", ")", ":", "last_snapshot", "=", "None", "for", "snapshot", "in", "__salt__", "[", "'snapper.list_snapshots'", "]", "(", "config", ")", ":", "if", "tag", "==", "snapshot", "[", "'userdata'", "]", ".", "get", "(", "\"baseline_tag\"", ")", ":", "if", "not", "last_snapshot", "or", "last_snapshot", "[", "'timestamp'", "]", "<", "snapshot", "[", "'timestamp'", "]", ":", "last_snapshot", "=", "snapshot", "return", "last_snapshot" ]
Returns the last created baseline snapshot marked with `tag`
[ "Returns", "the", "last", "created", "baseline", "snapshot", "marked", "with", "tag" ]
python
train
41.5
wind-python/windpowerlib
windpowerlib/wind_turbine_cluster.py
https://github.com/wind-python/windpowerlib/blob/421b316139743311b7cb68a69f6b53d2665f7e23/windpowerlib/wind_turbine_cluster.py#L114-L193
def assign_power_curve(self, wake_losses_model='power_efficiency_curve', smoothing=False, block_width=0.5, standard_deviation_method='turbulence_intensity', smoothing_order='wind_farm_power_curves', turbulence_intensity=None, **kwargs): r""" Calculates the power curve of a wind turbine cluster. The turbine cluster power curve is calculated by aggregating the wind farm power curves of wind farms within the turbine cluster. Depending on the parameters the power curves are smoothed (before or after the aggregation) and/or a wind farm efficiency is applied before the aggregation. After the calculations the power curve is assigned to the attribute `power_curve`. Parameters ---------- wake_losses_model : string Defines the method for taking wake losses within the farm into consideration. Options: 'power_efficiency_curve', 'constant_efficiency' or None. Default: 'power_efficiency_curve'. smoothing : boolean If True the power curves will be smoothed before or after the aggregation of power curves depending on `smoothing_order`. Default: False. block_width : float Width between the wind speeds in the sum of the equation in :py:func:`~.power_curves.smooth_power_curve`. Default: 0.5. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. smoothing_order : string Defines when the smoothing takes place if `smoothing` is True. Options: 'turbine_power_curves' (to the single turbine power curves), 'wind_farm_power_curves'. Default: 'wind_farm_power_curves'. turbulence_intensity : float Turbulence intensity at hub height of the wind farm or wind turbine cluster for power curve smoothing with 'turbulence_intensity' method. Can be calculated from `roughness_length` instead. Default: None. Other Parameters ---------------- roughness_length : float, optional. Roughness length. If `standard_deviation_method` is 'turbulence_intensity' and `turbulence_intensity` is not given the turbulence intensity is calculated via the roughness length. Returns ------- self """ # Assign wind farm power curves to wind farms of wind turbine cluster for farm in self.wind_farms: # Assign hub heights (needed for power curve and later for # hub height of turbine cluster) farm.mean_hub_height() # Assign wind farm power curve farm.assign_power_curve( wake_losses_model=wake_losses_model, smoothing=smoothing, block_width=block_width, standard_deviation_method=standard_deviation_method, smoothing_order=smoothing_order, turbulence_intensity=turbulence_intensity, **kwargs) # Create data frame from power curves of all wind farms df = pd.concat([farm.power_curve.set_index(['wind_speed']).rename( columns={'value': farm.name}) for farm in self.wind_farms], axis=1) # Sum up power curves cluster_power_curve = pd.DataFrame( df.interpolate(method='index').sum(axis=1)) cluster_power_curve.columns = ['value'] # Return wind speed (index) to a column of the data frame cluster_power_curve.reset_index('wind_speed', inplace=True) self.power_curve = cluster_power_curve return self
[ "def", "assign_power_curve", "(", "self", ",", "wake_losses_model", "=", "'power_efficiency_curve'", ",", "smoothing", "=", "False", ",", "block_width", "=", "0.5", ",", "standard_deviation_method", "=", "'turbulence_intensity'", ",", "smoothing_order", "=", "'wind_farm_power_curves'", ",", "turbulence_intensity", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Assign wind farm power curves to wind farms of wind turbine cluster", "for", "farm", "in", "self", ".", "wind_farms", ":", "# Assign hub heights (needed for power curve and later for", "# hub height of turbine cluster)", "farm", ".", "mean_hub_height", "(", ")", "# Assign wind farm power curve", "farm", ".", "assign_power_curve", "(", "wake_losses_model", "=", "wake_losses_model", ",", "smoothing", "=", "smoothing", ",", "block_width", "=", "block_width", ",", "standard_deviation_method", "=", "standard_deviation_method", ",", "smoothing_order", "=", "smoothing_order", ",", "turbulence_intensity", "=", "turbulence_intensity", ",", "*", "*", "kwargs", ")", "# Create data frame from power curves of all wind farms", "df", "=", "pd", ".", "concat", "(", "[", "farm", ".", "power_curve", ".", "set_index", "(", "[", "'wind_speed'", "]", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "farm", ".", "name", "}", ")", "for", "farm", "in", "self", ".", "wind_farms", "]", ",", "axis", "=", "1", ")", "# Sum up power curves", "cluster_power_curve", "=", "pd", ".", "DataFrame", "(", "df", ".", "interpolate", "(", "method", "=", "'index'", ")", ".", "sum", "(", "axis", "=", "1", ")", ")", "cluster_power_curve", ".", "columns", "=", "[", "'value'", "]", "# Return wind speed (index) to a column of the data frame", "cluster_power_curve", ".", "reset_index", "(", "'wind_speed'", ",", "inplace", "=", "True", ")", "self", ".", "power_curve", "=", "cluster_power_curve", "return", "self" ]
r""" Calculates the power curve of a wind turbine cluster. The turbine cluster power curve is calculated by aggregating the wind farm power curves of wind farms within the turbine cluster. Depending on the parameters the power curves are smoothed (before or after the aggregation) and/or a wind farm efficiency is applied before the aggregation. After the calculations the power curve is assigned to the attribute `power_curve`. Parameters ---------- wake_losses_model : string Defines the method for taking wake losses within the farm into consideration. Options: 'power_efficiency_curve', 'constant_efficiency' or None. Default: 'power_efficiency_curve'. smoothing : boolean If True the power curves will be smoothed before or after the aggregation of power curves depending on `smoothing_order`. Default: False. block_width : float Width between the wind speeds in the sum of the equation in :py:func:`~.power_curves.smooth_power_curve`. Default: 0.5. standard_deviation_method : string Method for calculating the standard deviation for the Gauss distribution. Options: 'turbulence_intensity', 'Staffell_Pfenninger'. Default: 'turbulence_intensity'. smoothing_order : string Defines when the smoothing takes place if `smoothing` is True. Options: 'turbine_power_curves' (to the single turbine power curves), 'wind_farm_power_curves'. Default: 'wind_farm_power_curves'. turbulence_intensity : float Turbulence intensity at hub height of the wind farm or wind turbine cluster for power curve smoothing with 'turbulence_intensity' method. Can be calculated from `roughness_length` instead. Default: None. Other Parameters ---------------- roughness_length : float, optional. Roughness length. If `standard_deviation_method` is 'turbulence_intensity' and `turbulence_intensity` is not given the turbulence intensity is calculated via the roughness length. Returns ------- self
[ "r", "Calculates", "the", "power", "curve", "of", "a", "wind", "turbine", "cluster", "." ]
python
train
48.3
pgmpy/pgmpy
pgmpy/readwrite/XMLBIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/XMLBIF.py#L300-L332
def get_states(self): """ Add outcome to variables of XMLBIF Return ------ dict: dict of type {variable: outcome tags} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_states() {'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>], 'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>], 'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>], 'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>], 'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]} """ outcome_tag = {} cpds = self.model.get_cpds() for cpd in cpds: var = cpd.variable outcome_tag[var] = [] if cpd.state_names is None or cpd.state_names.get(var) is None: states = range(cpd.get_cardinality([var])[var]) else: states = cpd.state_names[var] for state in states: state_tag = etree.SubElement(self.variables[var], "OUTCOME") state_tag.text = self._make_valid_state_name(state) outcome_tag[var].append(state_tag) return outcome_tag
[ "def", "get_states", "(", "self", ")", ":", "outcome_tag", "=", "{", "}", "cpds", "=", "self", ".", "model", ".", "get_cpds", "(", ")", "for", "cpd", "in", "cpds", ":", "var", "=", "cpd", ".", "variable", "outcome_tag", "[", "var", "]", "=", "[", "]", "if", "cpd", ".", "state_names", "is", "None", "or", "cpd", ".", "state_names", ".", "get", "(", "var", ")", "is", "None", ":", "states", "=", "range", "(", "cpd", ".", "get_cardinality", "(", "[", "var", "]", ")", "[", "var", "]", ")", "else", ":", "states", "=", "cpd", ".", "state_names", "[", "var", "]", "for", "state", "in", "states", ":", "state_tag", "=", "etree", ".", "SubElement", "(", "self", ".", "variables", "[", "var", "]", ",", "\"OUTCOME\"", ")", "state_tag", ".", "text", "=", "self", ".", "_make_valid_state_name", "(", "state", ")", "outcome_tag", "[", "var", "]", ".", "append", "(", "state_tag", ")", "return", "outcome_tag" ]
Add outcome to variables of XMLBIF Return ------ dict: dict of type {variable: outcome tags} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_states() {'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>], 'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>], 'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>], 'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>], 'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
[ "Add", "outcome", "to", "variables", "of", "XMLBIF" ]
python
train
40.969697
alerta/alerta
alerta/commands.py
https://github.com/alerta/alerta/blob/6478d6addc217c96a4a6688fab841035bef134e1/alerta/commands.py#L78-L110
def user(username, password, all): """Create admin users (BasicAuth only).""" if current_app.config['AUTH_PROVIDER'] != 'basic': raise click.UsageError('Not required for {} admin users'.format(current_app.config['AUTH_PROVIDER'])) if username and username not in current_app.config['ADMIN_USERS']: raise click.UsageError('User {} not an admin'.format(username)) if not username and not all: raise click.UsageError('Missing option "--username".') def create_user(admin): email = admin if '@' in admin else None user = User( name='Admin user', login=admin, password=generate_password_hash(password), roles=['admin'], text='Created by alertad script', email=email, email_verified=bool(email) ) try: db.get_db() # init db on global app context user = user.create() except Exception as e: click.echo('ERROR: {}'.format(e)) else: click.echo('{} {}'.format(user.id, user.name)) if all: for admin in current_app.config['ADMIN_USERS']: create_user(admin) else: create_user(username)
[ "def", "user", "(", "username", ",", "password", ",", "all", ")", ":", "if", "current_app", ".", "config", "[", "'AUTH_PROVIDER'", "]", "!=", "'basic'", ":", "raise", "click", ".", "UsageError", "(", "'Not required for {} admin users'", ".", "format", "(", "current_app", ".", "config", "[", "'AUTH_PROVIDER'", "]", ")", ")", "if", "username", "and", "username", "not", "in", "current_app", ".", "config", "[", "'ADMIN_USERS'", "]", ":", "raise", "click", ".", "UsageError", "(", "'User {} not an admin'", ".", "format", "(", "username", ")", ")", "if", "not", "username", "and", "not", "all", ":", "raise", "click", ".", "UsageError", "(", "'Missing option \"--username\".'", ")", "def", "create_user", "(", "admin", ")", ":", "email", "=", "admin", "if", "'@'", "in", "admin", "else", "None", "user", "=", "User", "(", "name", "=", "'Admin user'", ",", "login", "=", "admin", ",", "password", "=", "generate_password_hash", "(", "password", ")", ",", "roles", "=", "[", "'admin'", "]", ",", "text", "=", "'Created by alertad script'", ",", "email", "=", "email", ",", "email_verified", "=", "bool", "(", "email", ")", ")", "try", ":", "db", ".", "get_db", "(", ")", "# init db on global app context", "user", "=", "user", ".", "create", "(", ")", "except", "Exception", "as", "e", ":", "click", ".", "echo", "(", "'ERROR: {}'", ".", "format", "(", "e", ")", ")", "else", ":", "click", ".", "echo", "(", "'{} {}'", ".", "format", "(", "user", ".", "id", ",", "user", ".", "name", ")", ")", "if", "all", ":", "for", "admin", "in", "current_app", ".", "config", "[", "'ADMIN_USERS'", "]", ":", "create_user", "(", "admin", ")", "else", ":", "create_user", "(", "username", ")" ]
Create admin users (BasicAuth only).
[ "Create", "admin", "users", "(", "BasicAuth", "only", ")", "." ]
python
train
36.484848
log2timeline/dfdatetime
dfdatetime/fake_time.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/fake_time.py#L37-L53
def _GetNormalizedTimestamp(self): """Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined. """ if self._normalized_timestamp is None: if self._number_of_seconds is not None: self._normalized_timestamp = ( decimal.Decimal(self._microseconds) / definitions.MICROSECONDS_PER_SECOND) self._normalized_timestamp += decimal.Decimal(self._number_of_seconds) return self._normalized_timestamp
[ "def", "_GetNormalizedTimestamp", "(", "self", ")", ":", "if", "self", ".", "_normalized_timestamp", "is", "None", ":", "if", "self", ".", "_number_of_seconds", "is", "not", "None", ":", "self", ".", "_normalized_timestamp", "=", "(", "decimal", ".", "Decimal", "(", "self", ".", "_microseconds", ")", "/", "definitions", ".", "MICROSECONDS_PER_SECOND", ")", "self", ".", "_normalized_timestamp", "+=", "decimal", ".", "Decimal", "(", "self", ".", "_number_of_seconds", ")", "return", "self", ".", "_normalized_timestamp" ]
Retrieves the normalized timestamp. Returns: decimal.Decimal: normalized timestamp, which contains the number of seconds since January 1, 1970 00:00:00 and a fraction of second used for increased precision, or None if the normalized timestamp cannot be determined.
[ "Retrieves", "the", "normalized", "timestamp", "." ]
python
train
40.235294
dailymuse/oz
oz/aws_cdn/__init__.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L33-L35
def set_cache_buster(redis, path, hash): """Sets the cache buster value for a given file path""" redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash)
[ "def", "set_cache_buster", "(", "redis", ",", "path", ",", "hash", ")", ":", "redis", ".", "hset", "(", "\"cache-buster:{}:v3\"", ".", "format", "(", "oz", ".", "settings", "[", "\"s3_bucket\"", "]", ")", ",", "path", ",", "hash", ")" ]
Sets the cache buster value for a given file path
[ "Sets", "the", "cache", "buster", "value", "for", "a", "given", "file", "path" ]
python
train
60
inonit/drf-haystack
drf_haystack/serializers.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L340-L360
def get_narrow_url(self, instance): """ Return a link suitable for narrowing on the current item. """ text = instance[0] request = self.context["request"] query_params = request.GET.copy() # Never keep the page query parameter in narrowing urls. # It will raise a NotFound exception when trying to paginate a narrowed queryset. page_query_param = self.get_paginate_by_param() if page_query_param and page_query_param in query_params: del query_params[page_query_param] selected_facets = set(query_params.pop(self.root.facet_query_params_text, [])) selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text}) query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets)) path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()} url = request.build_absolute_uri(path) return serializers.Hyperlink(url, "narrow-url")
[ "def", "get_narrow_url", "(", "self", ",", "instance", ")", ":", "text", "=", "instance", "[", "0", "]", "request", "=", "self", ".", "context", "[", "\"request\"", "]", "query_params", "=", "request", ".", "GET", ".", "copy", "(", ")", "# Never keep the page query parameter in narrowing urls.", "# It will raise a NotFound exception when trying to paginate a narrowed queryset.", "page_query_param", "=", "self", ".", "get_paginate_by_param", "(", ")", "if", "page_query_param", "and", "page_query_param", "in", "query_params", ":", "del", "query_params", "[", "page_query_param", "]", "selected_facets", "=", "set", "(", "query_params", ".", "pop", "(", "self", ".", "root", ".", "facet_query_params_text", ",", "[", "]", ")", ")", "selected_facets", ".", "add", "(", "\"%(field)s_exact:%(text)s\"", "%", "{", "\"field\"", ":", "self", ".", "parent_field", ",", "\"text\"", ":", "text", "}", ")", "query_params", ".", "setlist", "(", "self", ".", "root", ".", "facet_query_params_text", ",", "sorted", "(", "selected_facets", ")", ")", "path", "=", "\"%(path)s?%(query)s\"", "%", "{", "\"path\"", ":", "request", ".", "path_info", ",", "\"query\"", ":", "query_params", ".", "urlencode", "(", ")", "}", "url", "=", "request", ".", "build_absolute_uri", "(", "path", ")", "return", "serializers", ".", "Hyperlink", "(", "url", ",", "\"narrow-url\"", ")" ]
Return a link suitable for narrowing on the current item.
[ "Return", "a", "link", "suitable", "for", "narrowing", "on", "the", "current", "item", "." ]
python
train
48.761905
sphinx-gallery/sphinx-gallery
sphinx_gallery/scrapers.py
https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/scrapers.py#L58-L101
def matplotlib_scraper(block, block_vars, gallery_conf, **kwargs): """Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`. """ matplotlib, plt = _import_matplotlib() image_path_iterator = block_vars['image_path_iterator'] image_paths = list() for fig_num, image_path in zip(plt.get_fignums(), image_path_iterator): if 'format' in kwargs: image_path = '%s.%s' % (os.path.splitext(image_path)[0], kwargs['format']) # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_num) to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr) and \ attr not in kwargs: kwargs[attr] = fig_attr fig.savefig(image_path, **kwargs) image_paths.append(image_path) plt.close('all') return figure_rst(image_paths, gallery_conf['src_dir'])
[ "def", "matplotlib_scraper", "(", "block", ",", "block_vars", ",", "gallery_conf", ",", "*", "*", "kwargs", ")", ":", "matplotlib", ",", "plt", "=", "_import_matplotlib", "(", ")", "image_path_iterator", "=", "block_vars", "[", "'image_path_iterator'", "]", "image_paths", "=", "list", "(", ")", "for", "fig_num", ",", "image_path", "in", "zip", "(", "plt", ".", "get_fignums", "(", ")", ",", "image_path_iterator", ")", ":", "if", "'format'", "in", "kwargs", ":", "image_path", "=", "'%s.%s'", "%", "(", "os", ".", "path", ".", "splitext", "(", "image_path", ")", "[", "0", "]", ",", "kwargs", "[", "'format'", "]", ")", "# Set the fig_num figure as the current figure as we can't", "# save a figure that's not the current figure.", "fig", "=", "plt", ".", "figure", "(", "fig_num", ")", "to_rgba", "=", "matplotlib", ".", "colors", ".", "colorConverter", ".", "to_rgba", "for", "attr", "in", "[", "'facecolor'", ",", "'edgecolor'", "]", ":", "fig_attr", "=", "getattr", "(", "fig", ",", "'get_'", "+", "attr", ")", "(", ")", "default_attr", "=", "matplotlib", ".", "rcParams", "[", "'figure.'", "+", "attr", "]", "if", "to_rgba", "(", "fig_attr", ")", "!=", "to_rgba", "(", "default_attr", ")", "and", "attr", "not", "in", "kwargs", ":", "kwargs", "[", "attr", "]", "=", "fig_attr", "fig", ".", "savefig", "(", "image_path", ",", "*", "*", "kwargs", ")", "image_paths", ".", "append", "(", "image_path", ")", "plt", ".", "close", "(", "'all'", ")", "return", "figure_rst", "(", "image_paths", ",", "gallery_conf", "[", "'src_dir'", "]", ")" ]
Scrape Matplotlib images. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery **kwargs : dict Additional keyword arguments to pass to :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``. The ``format`` kwarg in particular is used to set the file extension of the output file (currently only 'png' and 'svg' are supported). Returns ------- rst : str The ReSTructuredText that will be rendered to HTML containing the images. This is often produced by :func:`figure_rst`.
[ "Scrape", "Matplotlib", "images", "." ]
python
train
41.681818
MIT-LCP/wfdb-python
wfdb/io/download.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L352-L368
def dl_full_file(url, save_file_name): """ Download a file. No checks are performed. Parameters ---------- url : str The url of the file to download save_file_name : str The name to save the file as """ response = requests.get(url) with open(save_file_name, 'wb') as writefile: writefile.write(response.content) return
[ "def", "dl_full_file", "(", "url", ",", "save_file_name", ")", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "with", "open", "(", "save_file_name", ",", "'wb'", ")", "as", "writefile", ":", "writefile", ".", "write", "(", "response", ".", "content", ")", "return" ]
Download a file. No checks are performed. Parameters ---------- url : str The url of the file to download save_file_name : str The name to save the file as
[ "Download", "a", "file", ".", "No", "checks", "are", "performed", "." ]
python
train
21.705882
pypa/pipenv
pipenv/vendor/urllib3/connection.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/connection.py#L145-L170
def _new_conn(self): """ Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection( (self._dns_host, self.port), self.timeout, **extra_kw) except SocketTimeout as e: raise ConnectTimeoutError( self, "Connection to %s timed out. (connect timeout=%s)" % (self.host, self.timeout)) except SocketError as e: raise NewConnectionError( self, "Failed to establish a new connection: %s" % e) return conn
[ "def", "_new_conn", "(", "self", ")", ":", "extra_kw", "=", "{", "}", "if", "self", ".", "source_address", ":", "extra_kw", "[", "'source_address'", "]", "=", "self", ".", "source_address", "if", "self", ".", "socket_options", ":", "extra_kw", "[", "'socket_options'", "]", "=", "self", ".", "socket_options", "try", ":", "conn", "=", "connection", ".", "create_connection", "(", "(", "self", ".", "_dns_host", ",", "self", ".", "port", ")", ",", "self", ".", "timeout", ",", "*", "*", "extra_kw", ")", "except", "SocketTimeout", "as", "e", ":", "raise", "ConnectTimeoutError", "(", "self", ",", "\"Connection to %s timed out. (connect timeout=%s)\"", "%", "(", "self", ".", "host", ",", "self", ".", "timeout", ")", ")", "except", "SocketError", "as", "e", ":", "raise", "NewConnectionError", "(", "self", ",", "\"Failed to establish a new connection: %s\"", "%", "e", ")", "return", "conn" ]
Establish a socket connection and set nodelay settings on it. :return: New socket connection.
[ "Establish", "a", "socket", "connection", "and", "set", "nodelay", "settings", "on", "it", "." ]
python
train
31.576923
pypa/pipenv
pipenv/vendor/click/_bashcomplete.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/_bashcomplete.py#L172-L191
def get_user_autocompletions(ctx, args, incomplete, cmd_param): """ :param ctx: context associated with the parsed command :param args: full list of args :param incomplete: the incomplete text to autocomplete :param cmd_param: command definition :return: all the possible user-specified completions for the param """ results = [] if isinstance(cmd_param.type, Choice): # Choices don't support descriptions. results = [(c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)] elif cmd_param.autocompletion is not None: dynamic_completions = cmd_param.autocompletion(ctx=ctx, args=args, incomplete=incomplete) results = [c if isinstance(c, tuple) else (c, None) for c in dynamic_completions] return results
[ "def", "get_user_autocompletions", "(", "ctx", ",", "args", ",", "incomplete", ",", "cmd_param", ")", ":", "results", "=", "[", "]", "if", "isinstance", "(", "cmd_param", ".", "type", ",", "Choice", ")", ":", "# Choices don't support descriptions.", "results", "=", "[", "(", "c", ",", "None", ")", "for", "c", "in", "cmd_param", ".", "type", ".", "choices", "if", "str", "(", "c", ")", ".", "startswith", "(", "incomplete", ")", "]", "elif", "cmd_param", ".", "autocompletion", "is", "not", "None", ":", "dynamic_completions", "=", "cmd_param", ".", "autocompletion", "(", "ctx", "=", "ctx", ",", "args", "=", "args", ",", "incomplete", "=", "incomplete", ")", "results", "=", "[", "c", "if", "isinstance", "(", "c", ",", "tuple", ")", "else", "(", "c", ",", "None", ")", "for", "c", "in", "dynamic_completions", "]", "return", "results" ]
:param ctx: context associated with the parsed command :param args: full list of args :param incomplete: the incomplete text to autocomplete :param cmd_param: command definition :return: all the possible user-specified completions for the param
[ ":", "param", "ctx", ":", "context", "associated", "with", "the", "parsed", "command", ":", "param", "args", ":", "full", "list", "of", "args", ":", "param", "incomplete", ":", "the", "incomplete", "text", "to", "autocomplete", ":", "param", "cmd_param", ":", "command", "definition", ":", "return", ":", "all", "the", "possible", "user", "-", "specified", "completions", "for", "the", "param" ]
python
train
46.4
bitesofcode/projexui
projexui/widgets/xorbquerywidget/xorbqueryentrywidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbquerywidget/xorbqueryentrywidget.py#L107-L124
def assignEditor(self): """ Assigns the editor for this entry based on the plugin. """ plugin = self.currentPlugin() column = self.currentColumn() value = self.currentValue() if not plugin: self.setEditor(None) return self.setUpdatesEnabled(False) self.blockSignals(True) op = self.uiOperatorDDL.currentText() self.setEditor(plugin.createEditor(self, column, op, value)) self.setUpdatesEnabled(True) self.blockSignals(False)
[ "def", "assignEditor", "(", "self", ")", ":", "plugin", "=", "self", ".", "currentPlugin", "(", ")", "column", "=", "self", ".", "currentColumn", "(", ")", "value", "=", "self", ".", "currentValue", "(", ")", "if", "not", "plugin", ":", "self", ".", "setEditor", "(", "None", ")", "return", "self", ".", "setUpdatesEnabled", "(", "False", ")", "self", ".", "blockSignals", "(", "True", ")", "op", "=", "self", ".", "uiOperatorDDL", ".", "currentText", "(", ")", "self", ".", "setEditor", "(", "plugin", ".", "createEditor", "(", "self", ",", "column", ",", "op", ",", "value", ")", ")", "self", ".", "setUpdatesEnabled", "(", "True", ")", "self", ".", "blockSignals", "(", "False", ")" ]
Assigns the editor for this entry based on the plugin.
[ "Assigns", "the", "editor", "for", "this", "entry", "based", "on", "the", "plugin", "." ]
python
train
31.666667
mailgun/talon
talon/signature/learning/helpers.py
https://github.com/mailgun/talon/blob/cdd84563dd329c4f887591807870d10015e0c7a7/talon/signature/learning/helpers.py#L127-L147
def extract_names(sender): """Tries to extract sender's names from `From:` header. It could extract not only the actual names but e.g. the name of the company, parts of email, etc. >>> extract_names('Sergey N. Obukhov <[email protected]>') ['Sergey', 'Obukhov', 'serobnic'] >>> extract_names('') [] """ sender = to_unicode(sender, precise=True) # Remove non-alphabetical characters sender = "".join([char if char.isalpha() else ' ' for char in sender]) # Remove too short words and words from "black" list i.e. # words like `ru`, `gmail`, `com`, `org`, etc. sender = [word for word in sender.split() if len(word) > 1 and not word in BAD_SENDER_NAMES] # Remove duplicates names = list(set(sender)) return names
[ "def", "extract_names", "(", "sender", ")", ":", "sender", "=", "to_unicode", "(", "sender", ",", "precise", "=", "True", ")", "# Remove non-alphabetical characters", "sender", "=", "\"\"", ".", "join", "(", "[", "char", "if", "char", ".", "isalpha", "(", ")", "else", "' '", "for", "char", "in", "sender", "]", ")", "# Remove too short words and words from \"black\" list i.e.", "# words like `ru`, `gmail`, `com`, `org`, etc.", "sender", "=", "[", "word", "for", "word", "in", "sender", ".", "split", "(", ")", "if", "len", "(", "word", ")", ">", "1", "and", "not", "word", "in", "BAD_SENDER_NAMES", "]", "# Remove duplicates", "names", "=", "list", "(", "set", "(", "sender", ")", ")", "return", "names" ]
Tries to extract sender's names from `From:` header. It could extract not only the actual names but e.g. the name of the company, parts of email, etc. >>> extract_names('Sergey N. Obukhov <[email protected]>') ['Sergey', 'Obukhov', 'serobnic'] >>> extract_names('') []
[ "Tries", "to", "extract", "sender", "s", "names", "from", "From", ":", "header", "." ]
python
train
36.809524
redcap-tools/PyCap
redcap/project.py
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L384-L420
def filter(self, query, output_fields=None): """Query the database and return subject information for those who match the query logic Parameters ---------- query: Query or QueryGroup Query(Group) object to process output_fields: list The fields desired for matching subjects Returns ------- A list of dictionaries whose keys contains at least the default field and at most each key passed in with output_fields, each dictionary representing a surviving row in the database. """ query_keys = query.fields() if not set(query_keys).issubset(set(self.field_names)): raise ValueError("One or more query keys not in project keys") query_keys.append(self.def_field) data = self.export_records(fields=query_keys) matches = query.filter(data, self.def_field) if matches: # if output_fields is empty, we'll download all fields, which is # not desired, so we limit download to def_field if not output_fields: output_fields = [self.def_field] # But if caller passed a string and not list, we need to listify if isinstance(output_fields, basestring): output_fields = [output_fields] return self.export_records(records=matches, fields=output_fields) else: # If there are no matches, then sending an empty list to # export_records will actually return all rows, which is not # what we want return []
[ "def", "filter", "(", "self", ",", "query", ",", "output_fields", "=", "None", ")", ":", "query_keys", "=", "query", ".", "fields", "(", ")", "if", "not", "set", "(", "query_keys", ")", ".", "issubset", "(", "set", "(", "self", ".", "field_names", ")", ")", ":", "raise", "ValueError", "(", "\"One or more query keys not in project keys\"", ")", "query_keys", ".", "append", "(", "self", ".", "def_field", ")", "data", "=", "self", ".", "export_records", "(", "fields", "=", "query_keys", ")", "matches", "=", "query", ".", "filter", "(", "data", ",", "self", ".", "def_field", ")", "if", "matches", ":", "# if output_fields is empty, we'll download all fields, which is", "# not desired, so we limit download to def_field", "if", "not", "output_fields", ":", "output_fields", "=", "[", "self", ".", "def_field", "]", "# But if caller passed a string and not list, we need to listify", "if", "isinstance", "(", "output_fields", ",", "basestring", ")", ":", "output_fields", "=", "[", "output_fields", "]", "return", "self", ".", "export_records", "(", "records", "=", "matches", ",", "fields", "=", "output_fields", ")", "else", ":", "# If there are no matches, then sending an empty list to", "# export_records will actually return all rows, which is not", "# what we want", "return", "[", "]" ]
Query the database and return subject information for those who match the query logic Parameters ---------- query: Query or QueryGroup Query(Group) object to process output_fields: list The fields desired for matching subjects Returns ------- A list of dictionaries whose keys contains at least the default field and at most each key passed in with output_fields, each dictionary representing a surviving row in the database.
[ "Query", "the", "database", "and", "return", "subject", "information", "for", "those", "who", "match", "the", "query", "logic" ]
python
train
43.081081
konstantinstadler/pymrio
pymrio/core/mriosystem.py
https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L83-L124
def reset_full(self, force=False, _meta=None): """ Remove all accounts which can be recalculated based on Z, Y, F, FY Parameters ---------- force: boolean, optional If True, reset to flows although the system can not be recalculated. Default: False _meta: MRIOMetaData, optional Metadata handler for logging, optional. Internal """ # Attriubtes to keep must be defined in the init: __basic__ strwarn = None for df in self.__basic__: if (getattr(self, df)) is None: if force: strwarn = ("Reset system warning - Recalculation after " "reset not possible " "because {} missing".format(df)) warnings.warn(strwarn, ResetWarning) else: raise ResetError("To few tables to recalculate the " "system after reset ({} missing) " "- reset can be forced by passing " "'force=True')".format(df)) if _meta: _meta._add_modify("Reset system to Z and Y") if strwarn: _meta._add_modify(strwarn) [setattr(self, key, None) for key in self.get_DataFrame( data=False, with_unit=False, with_population=False) if key not in self.__basic__] return self
[ "def", "reset_full", "(", "self", ",", "force", "=", "False", ",", "_meta", "=", "None", ")", ":", "# Attriubtes to keep must be defined in the init: __basic__", "strwarn", "=", "None", "for", "df", "in", "self", ".", "__basic__", ":", "if", "(", "getattr", "(", "self", ",", "df", ")", ")", "is", "None", ":", "if", "force", ":", "strwarn", "=", "(", "\"Reset system warning - Recalculation after \"", "\"reset not possible \"", "\"because {} missing\"", ".", "format", "(", "df", ")", ")", "warnings", ".", "warn", "(", "strwarn", ",", "ResetWarning", ")", "else", ":", "raise", "ResetError", "(", "\"To few tables to recalculate the \"", "\"system after reset ({} missing) \"", "\"- reset can be forced by passing \"", "\"'force=True')\"", ".", "format", "(", "df", ")", ")", "if", "_meta", ":", "_meta", ".", "_add_modify", "(", "\"Reset system to Z and Y\"", ")", "if", "strwarn", ":", "_meta", ".", "_add_modify", "(", "strwarn", ")", "[", "setattr", "(", "self", ",", "key", ",", "None", ")", "for", "key", "in", "self", ".", "get_DataFrame", "(", "data", "=", "False", ",", "with_unit", "=", "False", ",", "with_population", "=", "False", ")", "if", "key", "not", "in", "self", ".", "__basic__", "]", "return", "self" ]
Remove all accounts which can be recalculated based on Z, Y, F, FY Parameters ---------- force: boolean, optional If True, reset to flows although the system can not be recalculated. Default: False _meta: MRIOMetaData, optional Metadata handler for logging, optional. Internal
[ "Remove", "all", "accounts", "which", "can", "be", "recalculated", "based", "on", "Z", "Y", "F", "FY" ]
python
train
35.880952
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L200-L204
def is_unprocessed_local_replica(pid): """Is local replica with status "queued".""" return d1_gmn.app.models.LocalReplica.objects.filter( pid__did=pid, info__status__status='queued' ).exists()
[ "def", "is_unprocessed_local_replica", "(", "pid", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "LocalReplica", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ",", "info__status__status", "=", "'queued'", ")", ".", "exists", "(", ")" ]
Is local replica with status "queued".
[ "Is", "local", "replica", "with", "status", "queued", "." ]
python
train
41.6
zabuldon/teslajsonpy
teslajsonpy/vehicle.py
https://github.com/zabuldon/teslajsonpy/blob/673ecdb5c9483160fb1b97e30e62f2c863761c39/teslajsonpy/vehicle.py#L59-L65
def assumed_state(self): # pylint: disable=protected-access """Return whether the data is from an online vehicle.""" return (not self._controller.car_online[self.id()] and (self._controller._last_update_time[self.id()] - self._controller._last_wake_up_time[self.id()] > self._controller.update_interval))
[ "def", "assumed_state", "(", "self", ")", ":", "# pylint: disable=protected-access", "return", "(", "not", "self", ".", "_controller", ".", "car_online", "[", "self", ".", "id", "(", ")", "]", "and", "(", "self", ".", "_controller", ".", "_last_update_time", "[", "self", ".", "id", "(", ")", "]", "-", "self", ".", "_controller", ".", "_last_wake_up_time", "[", "self", ".", "id", "(", ")", "]", ">", "self", ".", "_controller", ".", "update_interval", ")", ")" ]
Return whether the data is from an online vehicle.
[ "Return", "whether", "the", "data", "is", "from", "an", "online", "vehicle", "." ]
python
train
53.142857
bsolomon1124/pyfinance
pyfinance/ols.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L421-L426
def _std_err(self): """Standard error of the estimate (SEE). A scalar. For standard errors of parameters, see _se_all, se_alpha, and se_beta. """ return np.sqrt(np.sum(np.square(self._resids), axis=1) / self._df_err)
[ "def", "_std_err", "(", "self", ")", ":", "return", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "np", ".", "square", "(", "self", ".", "_resids", ")", ",", "axis", "=", "1", ")", "/", "self", ".", "_df_err", ")" ]
Standard error of the estimate (SEE). A scalar. For standard errors of parameters, see _se_all, se_alpha, and se_beta.
[ "Standard", "error", "of", "the", "estimate", "(", "SEE", ")", ".", "A", "scalar", ".", "For", "standard", "errors", "of", "parameters", "see", "_se_all", "se_alpha", "and", "se_beta", "." ]
python
train
41.666667
libChEBI/libChEBIpy
libchebipy/_parsers.py
https://github.com/libChEBI/libChEBIpy/blob/89f223a91f518619d5e3910070d283adcac1626e/libchebipy/_parsers.py#L91-L96
def get_charge(chebi_id): '''Returns charge''' if len(__CHARGES) == 0: __parse_chemical_data() return __CHARGES[chebi_id] if chebi_id in __CHARGES else float('NaN')
[ "def", "get_charge", "(", "chebi_id", ")", ":", "if", "len", "(", "__CHARGES", ")", "==", "0", ":", "__parse_chemical_data", "(", ")", "return", "__CHARGES", "[", "chebi_id", "]", "if", "chebi_id", "in", "__CHARGES", "else", "float", "(", "'NaN'", ")" ]
Returns charge
[ "Returns", "charge" ]
python
train
30
RI-imaging/qpsphere
qpsphere/models/_bhfield/wrap.py
https://github.com/RI-imaging/qpsphere/blob/3cfa0e9fb8e81be8c820abbeccd47242e7972ac1/qpsphere/models/_bhfield/wrap.py#L224-L303
def run_simulation(wdir, arp=True, **kwargs): """ Example ------- 100-nm silica sphere with 10-nm thick Ag coating, embedded in water; arprec 20 digits; illuminated with YAG (1064nm); scan xz plane (21x21, +-200nm) bhfield-arp-db.exe mpdigit wl r_core r_coat n_grid_x xspan_min xspan_max n_grid_y yspan_min yspan_max n_grid_z zspan_min zspan_max case Kreibig [n_med n_core k_core n_coat k_coat (case=other)] bhfield-arp-db.exe 20 1.064 0.050 0.060 21 -0.2 0.2 1 0 0 21 -0.2 0.2 other 0 1.3205 1.53413 0 0.565838 7.23262 Explanation of parameters ------------------------- mpdigit: arprec's number of precision digits; increase it to overcome round-off errors wl[um]: light wavelength in vacuum r_core[um], r_coat[um]: core & coat radii n_grid_x xspan_min[um] xspan_max[um]: number & span of grid points for field computation; x span n_grid_y yspan_min[um] yspan_max[um]: y span n_grid_z zspan_min[um] zspan_max[um]: z span Kreibig: Kreibig mean free path correction for Ag (0.0 - 1.0) case: nanoshell/liposome/HPC/barber/other n_med n_core k_core n_coat k_coat (case=other only): refractive indices of medium (real), core & coat (n, k) If `case=other`, complex refractive indices (n, k at the particular wavelength) must be specified. Otherwise (case = nanoshell etc) the medium/core/coat materials are predefined and the n,k values are taken from the data file (Ag_palik.nk etc). The latter reflects our own interest and is intended for use in our lab, so general users may not find it useful :-) """ wdir = pathlib.Path(wdir) cmd = "{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} " \ + "{n_grid_x:d} {xspan_min:f} {xspan_max:f} " \ + "{n_grid_y:d} {yspan_min:f} {yspan_max:f} " \ + "{n_grid_z:d} {zspan_min:f} {zspan_max:f} " \ + "{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} " \ + "{n_coat:f} {k_coat:f}" old_dir = pathlib.Path.cwd() os.chdir(wdir) kwargs["pathbhfield"] = get_binary(arp=arp) if arp: kwargs["mpdigit"] = 16 else: kwargs["mpdigit"] = "" # run simulation with kwargs sp.check_output(cmd.format(**kwargs), shell=True) # Go back to orgignal directory before checking (checking might fail) os.chdir(old_dir) # Check bhdebug.txt to make sure that you specify enough digits to # overcome roundoff errors. check_simulation(wdir)
[ "def", "run_simulation", "(", "wdir", ",", "arp", "=", "True", ",", "*", "*", "kwargs", ")", ":", "wdir", "=", "pathlib", ".", "Path", "(", "wdir", ")", "cmd", "=", "\"{pathbhfield} {mpdigit} {wl:f} {r_core:f} {r_coat:f} \"", "+", "\"{n_grid_x:d} {xspan_min:f} {xspan_max:f} \"", "+", "\"{n_grid_y:d} {yspan_min:f} {yspan_max:f} \"", "+", "\"{n_grid_z:d} {zspan_min:f} {zspan_max:f} \"", "+", "\"{case} {Kreibig:f} {n_med:f} {n_core:f} {k_core:f} \"", "+", "\"{n_coat:f} {k_coat:f}\"", "old_dir", "=", "pathlib", ".", "Path", ".", "cwd", "(", ")", "os", ".", "chdir", "(", "wdir", ")", "kwargs", "[", "\"pathbhfield\"", "]", "=", "get_binary", "(", "arp", "=", "arp", ")", "if", "arp", ":", "kwargs", "[", "\"mpdigit\"", "]", "=", "16", "else", ":", "kwargs", "[", "\"mpdigit\"", "]", "=", "\"\"", "# run simulation with kwargs", "sp", ".", "check_output", "(", "cmd", ".", "format", "(", "*", "*", "kwargs", ")", ",", "shell", "=", "True", ")", "# Go back to orgignal directory before checking (checking might fail)", "os", ".", "chdir", "(", "old_dir", ")", "# Check bhdebug.txt to make sure that you specify enough digits to", "# overcome roundoff errors.", "check_simulation", "(", "wdir", ")" ]
Example ------- 100-nm silica sphere with 10-nm thick Ag coating, embedded in water; arprec 20 digits; illuminated with YAG (1064nm); scan xz plane (21x21, +-200nm) bhfield-arp-db.exe mpdigit wl r_core r_coat n_grid_x xspan_min xspan_max n_grid_y yspan_min yspan_max n_grid_z zspan_min zspan_max case Kreibig [n_med n_core k_core n_coat k_coat (case=other)] bhfield-arp-db.exe 20 1.064 0.050 0.060 21 -0.2 0.2 1 0 0 21 -0.2 0.2 other 0 1.3205 1.53413 0 0.565838 7.23262 Explanation of parameters ------------------------- mpdigit: arprec's number of precision digits; increase it to overcome round-off errors wl[um]: light wavelength in vacuum r_core[um], r_coat[um]: core & coat radii n_grid_x xspan_min[um] xspan_max[um]: number & span of grid points for field computation; x span n_grid_y yspan_min[um] yspan_max[um]: y span n_grid_z zspan_min[um] zspan_max[um]: z span Kreibig: Kreibig mean free path correction for Ag (0.0 - 1.0) case: nanoshell/liposome/HPC/barber/other n_med n_core k_core n_coat k_coat (case=other only): refractive indices of medium (real), core & coat (n, k) If `case=other`, complex refractive indices (n, k at the particular wavelength) must be specified. Otherwise (case = nanoshell etc) the medium/core/coat materials are predefined and the n,k values are taken from the data file (Ag_palik.nk etc). The latter reflects our own interest and is intended for use in our lab, so general users may not find it useful :-)
[ "Example", "-------", "100", "-", "nm", "silica", "sphere", "with", "10", "-", "nm", "thick", "Ag", "coating", "embedded", "in", "water", ";", "arprec", "20", "digits", ";", "illuminated", "with", "YAG", "(", "1064nm", ")", ";", "scan", "xz", "plane", "(", "21x21", "+", "-", "200nm", ")" ]
python
train
34.0625
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L485-L520
def load(self: T, **kwargs) -> T: """Manually trigger loading of this dataset's data from disk or a remote source into memory and return this dataset. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute """ # access .data to coerce everything to numpy or dask arrays lazy_data = {k: v._data for k, v in self.variables.items() if isinstance(v._data, dask_array_type)} if lazy_data: import dask.array as da # evaluate all the dask arrays simultaneously evaluated_data = da.compute(*lazy_data.values(), **kwargs) for k, data in zip(lazy_data, evaluated_data): self.variables[k].data = data # load everything else sequentially for k, v in self.variables.items(): if k not in lazy_data: v.load() return self
[ "def", "load", "(", "self", ":", "T", ",", "*", "*", "kwargs", ")", "->", "T", ":", "# access .data to coerce everything to numpy or dask arrays", "lazy_data", "=", "{", "k", ":", "v", ".", "_data", "for", "k", ",", "v", "in", "self", ".", "variables", ".", "items", "(", ")", "if", "isinstance", "(", "v", ".", "_data", ",", "dask_array_type", ")", "}", "if", "lazy_data", ":", "import", "dask", ".", "array", "as", "da", "# evaluate all the dask arrays simultaneously", "evaluated_data", "=", "da", ".", "compute", "(", "*", "lazy_data", ".", "values", "(", ")", ",", "*", "*", "kwargs", ")", "for", "k", ",", "data", "in", "zip", "(", "lazy_data", ",", "evaluated_data", ")", ":", "self", ".", "variables", "[", "k", "]", ".", "data", "=", "data", "# load everything else sequentially", "for", "k", ",", "v", "in", "self", ".", "variables", ".", "items", "(", ")", ":", "if", "k", "not", "in", "lazy_data", ":", "v", ".", "load", "(", ")", "return", "self" ]
Manually trigger loading of this dataset's data from disk or a remote source into memory and return this dataset. Normally, it should not be necessary to call this method in user code, because all xarray functions should either work on deferred data or load data automatically. However, this method can be necessary when working with many file objects on disk. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.array.compute``. See Also -------- dask.array.compute
[ "Manually", "trigger", "loading", "of", "this", "dataset", "s", "data", "from", "disk", "or", "a", "remote", "source", "into", "memory", "and", "return", "this", "dataset", "." ]
python
train
35.75
jmcarp/flask-apispec
flask_apispec/annotations.py
https://github.com/jmcarp/flask-apispec/blob/d8cb658fa427f051568e58d6af201b8e9924c325/flask_apispec/annotations.py#L8-L38
def use_kwargs(args, locations=None, inherit=None, apply=None, **kwargs): """Inject keyword arguments from the specified webargs arguments into the decorated view function. Usage: .. code-block:: python from marshmallow import fields @use_kwargs({'name': fields.Str(), 'category': fields.Str()}) def get_pets(**kwargs): return Pet.query.filter_by(**kwargs).all() :param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>` objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a request and returns a :class:`Schema <marshmallow.Schema>` :param locations: Default request locations to parse :param inherit: Inherit args from parent classes :param apply: Parse request with specified args """ kwargs.update({'locations': locations}) def wrapper(func): options = { 'args': args, 'kwargs': kwargs, } annotate(func, 'args', [options], inherit=inherit, apply=apply) return activate(func) return wrapper
[ "def", "use_kwargs", "(", "args", ",", "locations", "=", "None", ",", "inherit", "=", "None", ",", "apply", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'locations'", ":", "locations", "}", ")", "def", "wrapper", "(", "func", ")", ":", "options", "=", "{", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", ",", "}", "annotate", "(", "func", ",", "'args'", ",", "[", "options", "]", ",", "inherit", "=", "inherit", ",", "apply", "=", "apply", ")", "return", "activate", "(", "func", ")", "return", "wrapper" ]
Inject keyword arguments from the specified webargs arguments into the decorated view function. Usage: .. code-block:: python from marshmallow import fields @use_kwargs({'name': fields.Str(), 'category': fields.Str()}) def get_pets(**kwargs): return Pet.query.filter_by(**kwargs).all() :param args: Mapping of argument names to :class:`Field <marshmallow.fields.Field>` objects, :class:`Schema <marshmallow.Schema>`, or a callable which accepts a request and returns a :class:`Schema <marshmallow.Schema>` :param locations: Default request locations to parse :param inherit: Inherit args from parent classes :param apply: Parse request with specified args
[ "Inject", "keyword", "arguments", "from", "the", "specified", "webargs", "arguments", "into", "the", "decorated", "view", "function", "." ]
python
train
34.645161
sdispater/poetry
poetry/masonry/publishing/uploader.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/masonry/publishing/uploader.py#L256-L282
def _register(self, session, url): """ Register a package to a repository. """ dist = self._poetry.file.parent / "dist" file = dist / "{}-{}.tar.gz".format( self._package.name, normalize_version(self._package.version.text) ) if not file.exists(): raise RuntimeError('"{0}" does not exist.'.format(file.name)) data = self.post_data(file) data.update({":action": "submit", "protocol_version": "1"}) data_to_send = self._prepare_data(data) encoder = MultipartEncoder(data_to_send) resp = session.post( url, data=encoder, allow_redirects=False, headers={"Content-Type": encoder.content_type}, ) resp.raise_for_status() return resp
[ "def", "_register", "(", "self", ",", "session", ",", "url", ")", ":", "dist", "=", "self", ".", "_poetry", ".", "file", ".", "parent", "/", "\"dist\"", "file", "=", "dist", "/", "\"{}-{}.tar.gz\"", ".", "format", "(", "self", ".", "_package", ".", "name", ",", "normalize_version", "(", "self", ".", "_package", ".", "version", ".", "text", ")", ")", "if", "not", "file", ".", "exists", "(", ")", ":", "raise", "RuntimeError", "(", "'\"{0}\" does not exist.'", ".", "format", "(", "file", ".", "name", ")", ")", "data", "=", "self", ".", "post_data", "(", "file", ")", "data", ".", "update", "(", "{", "\":action\"", ":", "\"submit\"", ",", "\"protocol_version\"", ":", "\"1\"", "}", ")", "data_to_send", "=", "self", ".", "_prepare_data", "(", "data", ")", "encoder", "=", "MultipartEncoder", "(", "data_to_send", ")", "resp", "=", "session", ".", "post", "(", "url", ",", "data", "=", "encoder", ",", "allow_redirects", "=", "False", ",", "headers", "=", "{", "\"Content-Type\"", ":", "encoder", ".", "content_type", "}", ",", ")", "resp", ".", "raise_for_status", "(", ")", "return", "resp" ]
Register a package to a repository.
[ "Register", "a", "package", "to", "a", "repository", "." ]
python
train
29.518519
dlintott/gns3-converter
gns3converter/node.py
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/node.py#L402-L410
def set_description(self): """ Set the node description """ if self.device_info['type'] == 'Router': self.node['description'] = '%s %s' % (self.device_info['type'], self.device_info['model']) else: self.node['description'] = self.device_info['desc']
[ "def", "set_description", "(", "self", ")", ":", "if", "self", ".", "device_info", "[", "'type'", "]", "==", "'Router'", ":", "self", ".", "node", "[", "'description'", "]", "=", "'%s %s'", "%", "(", "self", ".", "device_info", "[", "'type'", "]", ",", "self", ".", "device_info", "[", "'model'", "]", ")", "else", ":", "self", ".", "node", "[", "'description'", "]", "=", "self", ".", "device_info", "[", "'desc'", "]" ]
Set the node description
[ "Set", "the", "node", "description" ]
python
train
39.444444
loads/molotov
molotov/util.py
https://github.com/loads/molotov/blob/bd2c94e7f250e1fbb21940f02c68b4437655bc11/molotov/util.py#L203-L208
def json_request(endpoint, verb='GET', session_options=None, **options): """Like :func:`molotov.request` but extracts json from the response. """ req = functools.partial(_request, endpoint, verb, session_options, json=True, **options) return _run_in_fresh_loop(req)
[ "def", "json_request", "(", "endpoint", ",", "verb", "=", "'GET'", ",", "session_options", "=", "None", ",", "*", "*", "options", ")", ":", "req", "=", "functools", ".", "partial", "(", "_request", ",", "endpoint", ",", "verb", ",", "session_options", ",", "json", "=", "True", ",", "*", "*", "options", ")", "return", "_run_in_fresh_loop", "(", "req", ")" ]
Like :func:`molotov.request` but extracts json from the response.
[ "Like", ":", "func", ":", "molotov", ".", "request", "but", "extracts", "json", "from", "the", "response", "." ]
python
train
50.666667
Jammy2211/PyAutoLens
autolens/lens/plane.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/lens/plane.py#L490-L493
def trace_to_next_plane(self): """Trace the positions to the next plane.""" return list(map(lambda positions, deflections: np.subtract(positions, deflections), self.positions, self.deflections))
[ "def", "trace_to_next_plane", "(", "self", ")", ":", "return", "list", "(", "map", "(", "lambda", "positions", ",", "deflections", ":", "np", ".", "subtract", "(", "positions", ",", "deflections", ")", ",", "self", ".", "positions", ",", "self", ".", "deflections", ")", ")" ]
Trace the positions to the next plane.
[ "Trace", "the", "positions", "to", "the", "next", "plane", "." ]
python
valid
57.75
EUDAT-B2SAFE/B2HANDLE
b2handle/handlesystemconnector.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handlesystemconnector.py#L447-L482
def check_if_username_exists(self, username): ''' Check if the username handles exists. :param username: The username, in the form index:prefix/suffix :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.GenericHandleError` :return: True. If it does not exist, an exception is raised. *Note:* Only the existence of the handle is verified. The existence or validity of the index is not checked, because entries containing a key are hidden anyway. ''' LOGGER.debug('check_if_username_exists...') _, handle = b2handle.utilhandle.remove_index_from_handle(username) resp = self.send_handle_get_request(handle) resp_content = decoded_response(resp) if b2handle.hsresponses.does_handle_exist(resp): handlerecord_json = json.loads(resp_content) if not handlerecord_json['handle'] == handle: raise GenericHandleError( operation='Checking if username exists', handle=handle, reponse=resp, msg='The check returned a different handle than was asked for.' ) return True elif b2handle.hsresponses.handle_not_found(resp): msg = 'The username handle does not exist' raise HandleNotFoundException(handle=handle, msg=msg, response=resp) else: op = 'checking if handle exists' msg = 'Checking if username exists went wrong' raise GenericHandleError(operation=op, handle=handle, response=resp, msg=msg)
[ "def", "check_if_username_exists", "(", "self", ",", "username", ")", ":", "LOGGER", ".", "debug", "(", "'check_if_username_exists...'", ")", "_", ",", "handle", "=", "b2handle", ".", "utilhandle", ".", "remove_index_from_handle", "(", "username", ")", "resp", "=", "self", ".", "send_handle_get_request", "(", "handle", ")", "resp_content", "=", "decoded_response", "(", "resp", ")", "if", "b2handle", ".", "hsresponses", ".", "does_handle_exist", "(", "resp", ")", ":", "handlerecord_json", "=", "json", ".", "loads", "(", "resp_content", ")", "if", "not", "handlerecord_json", "[", "'handle'", "]", "==", "handle", ":", "raise", "GenericHandleError", "(", "operation", "=", "'Checking if username exists'", ",", "handle", "=", "handle", ",", "reponse", "=", "resp", ",", "msg", "=", "'The check returned a different handle than was asked for.'", ")", "return", "True", "elif", "b2handle", ".", "hsresponses", ".", "handle_not_found", "(", "resp", ")", ":", "msg", "=", "'The username handle does not exist'", "raise", "HandleNotFoundException", "(", "handle", "=", "handle", ",", "msg", "=", "msg", ",", "response", "=", "resp", ")", "else", ":", "op", "=", "'checking if handle exists'", "msg", "=", "'Checking if username exists went wrong'", "raise", "GenericHandleError", "(", "operation", "=", "op", ",", "handle", "=", "handle", ",", "response", "=", "resp", ",", "msg", "=", "msg", ")" ]
Check if the username handles exists. :param username: The username, in the form index:prefix/suffix :raises: :exc:`~b2handle.handleexceptions.HandleNotFoundException` :raises: :exc:`~b2handle.handleexceptions.GenericHandleError` :return: True. If it does not exist, an exception is raised. *Note:* Only the existence of the handle is verified. The existence or validity of the index is not checked, because entries containing a key are hidden anyway.
[ "Check", "if", "the", "username", "handles", "exists", "." ]
python
train
45.861111
Alignak-monitoring/alignak
alignak/objects/host.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/host.py#L319-L350
def is_correct(self): """Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool """ state = True # Internal checks before executing inherited function... cls = self.__class__ if hasattr(self, 'host_name'): for char in cls.illegal_object_name_chars: if char in self.host_name: self.add_error("[%s::%s] host_name contains an illegal character: %s" % (self.my_type, self.get_name(), char)) state = False # Fred: do not alert about missing check_command for an host... this because 1/ it is # very verbose if hosts are not checked and 2/ because it is the Nagios default behavior # if not self.check_command: # self.add_warning("[%s::%s] has no defined check command" # % (self.my_type, self.get_name())) if self.notifications_enabled and not self.contacts: self.add_warning("[%s::%s] notifications are enabled but no contacts nor " "contact_groups property is defined for this host" % (self.my_type, self.get_name())) return super(Host, self).is_correct() and state
[ "def", "is_correct", "(", "self", ")", ":", "state", "=", "True", "# Internal checks before executing inherited function...", "cls", "=", "self", ".", "__class__", "if", "hasattr", "(", "self", ",", "'host_name'", ")", ":", "for", "char", "in", "cls", ".", "illegal_object_name_chars", ":", "if", "char", "in", "self", ".", "host_name", ":", "self", ".", "add_error", "(", "\"[%s::%s] host_name contains an illegal character: %s\"", "%", "(", "self", ".", "my_type", ",", "self", ".", "get_name", "(", ")", ",", "char", ")", ")", "state", "=", "False", "# Fred: do not alert about missing check_command for an host... this because 1/ it is", "# very verbose if hosts are not checked and 2/ because it is the Nagios default behavior", "# if not self.check_command:", "# self.add_warning(\"[%s::%s] has no defined check command\"", "# % (self.my_type, self.get_name()))", "if", "self", ".", "notifications_enabled", "and", "not", "self", ".", "contacts", ":", "self", ".", "add_warning", "(", "\"[%s::%s] notifications are enabled but no contacts nor \"", "\"contact_groups property is defined for this host\"", "%", "(", "self", ".", "my_type", ",", "self", ".", "get_name", "(", ")", ")", ")", "return", "super", "(", "Host", ",", "self", ")", ".", "is_correct", "(", ")", "and", "state" ]
Check if this object configuration is correct :: * Check our own specific properties * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool
[ "Check", "if", "this", "object", "configuration", "is", "correct", "::" ]
python
train
44.375
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L597-L604
def AddMethod(self, function, name=None): """ Adds the specified function as a method of this construction environment with the specified name. If the name is omitted, the default name is the name of the function itself. """ method = MethodWrapper(self, function, name) self.added_methods.append(method)
[ "def", "AddMethod", "(", "self", ",", "function", ",", "name", "=", "None", ")", ":", "method", "=", "MethodWrapper", "(", "self", ",", "function", ",", "name", ")", "self", ".", "added_methods", ".", "append", "(", "method", ")" ]
Adds the specified function as a method of this construction environment with the specified name. If the name is omitted, the default name is the name of the function itself.
[ "Adds", "the", "specified", "function", "as", "a", "method", "of", "this", "construction", "environment", "with", "the", "specified", "name", ".", "If", "the", "name", "is", "omitted", "the", "default", "name", "is", "the", "name", "of", "the", "function", "itself", "." ]
python
train
44.125
gitpython-developers/smmap
smmap/mman.py
https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L225-L233
def fd(self): """:return: file descriptor used to create the underlying mapping. **Note:** it is not required to be valid anymore :raise ValueError: if the mapping was not created by a file descriptor""" if isinstance(self._rlist.path_or_fd(), string_types()): raise ValueError("File descriptor queried although mapping was generated from path") # END handle type return self._rlist.path_or_fd()
[ "def", "fd", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_rlist", ".", "path_or_fd", "(", ")", ",", "string_types", "(", ")", ")", ":", "raise", "ValueError", "(", "\"File descriptor queried although mapping was generated from path\"", ")", "# END handle type", "return", "self", ".", "_rlist", ".", "path_or_fd", "(", ")" ]
:return: file descriptor used to create the underlying mapping. **Note:** it is not required to be valid anymore :raise ValueError: if the mapping was not created by a file descriptor
[ ":", "return", ":", "file", "descriptor", "used", "to", "create", "the", "underlying", "mapping", "." ]
python
train
49.777778
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L632-L646
def get_version(): """ Retrieves the version of the GLFW library. Wrapper for: void glfwGetVersion(int* major, int* minor, int* rev); """ major_value = ctypes.c_int(0) major = ctypes.pointer(major_value) minor_value = ctypes.c_int(0) minor = ctypes.pointer(minor_value) rev_value = ctypes.c_int(0) rev = ctypes.pointer(rev_value) _glfw.glfwGetVersion(major, minor, rev) return major_value.value, minor_value.value, rev_value.value
[ "def", "get_version", "(", ")", ":", "major_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "major", "=", "ctypes", ".", "pointer", "(", "major_value", ")", "minor_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "minor", "=", "ctypes", ".", "pointer", "(", "minor_value", ")", "rev_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "rev", "=", "ctypes", ".", "pointer", "(", "rev_value", ")", "_glfw", ".", "glfwGetVersion", "(", "major", ",", "minor", ",", "rev", ")", "return", "major_value", ".", "value", ",", "minor_value", ".", "value", ",", "rev_value", ".", "value" ]
Retrieves the version of the GLFW library. Wrapper for: void glfwGetVersion(int* major, int* minor, int* rev);
[ "Retrieves", "the", "version", "of", "the", "GLFW", "library", "." ]
python
train
31.533333
django-dbbackup/django-dbbackup
dbbackup/management/commands/mediabackup.py
https://github.com/django-dbbackup/django-dbbackup/blob/77de209e2d5317e51510d0f888e085ee0c400d66/dbbackup/management/commands/mediabackup.py#L59-L68
def _explore_storage(self): """Generator of all files contained in media storage.""" path = '' dirs = [path] while dirs: path = dirs.pop() subdirs, files = self.media_storage.listdir(path) for media_filename in files: yield os.path.join(path, media_filename) dirs.extend([os.path.join(path, subdir) for subdir in subdirs])
[ "def", "_explore_storage", "(", "self", ")", ":", "path", "=", "''", "dirs", "=", "[", "path", "]", "while", "dirs", ":", "path", "=", "dirs", ".", "pop", "(", ")", "subdirs", ",", "files", "=", "self", ".", "media_storage", ".", "listdir", "(", "path", ")", "for", "media_filename", "in", "files", ":", "yield", "os", ".", "path", ".", "join", "(", "path", ",", "media_filename", ")", "dirs", ".", "extend", "(", "[", "os", ".", "path", ".", "join", "(", "path", ",", "subdir", ")", "for", "subdir", "in", "subdirs", "]", ")" ]
Generator of all files contained in media storage.
[ "Generator", "of", "all", "files", "contained", "in", "media", "storage", "." ]
python
train
40.9
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L1003-L1026
def status(self): """ The current status of the event (started, finished or pending). """ myNow = timezone.localtime(timezone=self.tz) daysDelta = dt.timedelta(days=self.num_days - 1) # NB: postponements can be created after the until date # so ignore that todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz) eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta) if eventStart is None: return "finished" eventFinish = getAwareDatetime(eventStart.date() + daysDelta, event.time_to, self.tz) if event.time_from is None: eventStart += _1day if eventStart < myNow < eventFinish: # if there are two occurences on the same day then we may miss # that one of them has started return "started" if (self.repeat.until and eventFinish < myNow and self.__afterOrPostponedTo(myNow)[0] is None): # only just wound up, the last occurence was earlier today return "finished"
[ "def", "status", "(", "self", ")", ":", "myNow", "=", "timezone", ".", "localtime", "(", "timezone", "=", "self", ".", "tz", ")", "daysDelta", "=", "dt", ".", "timedelta", "(", "days", "=", "self", ".", "num_days", "-", "1", ")", "# NB: postponements can be created after the until date", "# so ignore that", "todayStart", "=", "getAwareDatetime", "(", "myNow", ".", "date", "(", ")", ",", "dt", ".", "time", ".", "min", ",", "self", ".", "tz", ")", "eventStart", ",", "event", "=", "self", ".", "__afterOrPostponedTo", "(", "todayStart", "-", "daysDelta", ")", "if", "eventStart", "is", "None", ":", "return", "\"finished\"", "eventFinish", "=", "getAwareDatetime", "(", "eventStart", ".", "date", "(", ")", "+", "daysDelta", ",", "event", ".", "time_to", ",", "self", ".", "tz", ")", "if", "event", ".", "time_from", "is", "None", ":", "eventStart", "+=", "_1day", "if", "eventStart", "<", "myNow", "<", "eventFinish", ":", "# if there are two occurences on the same day then we may miss", "# that one of them has started", "return", "\"started\"", "if", "(", "self", ".", "repeat", ".", "until", "and", "eventFinish", "<", "myNow", "and", "self", ".", "__afterOrPostponedTo", "(", "myNow", ")", "[", "0", "]", "is", "None", ")", ":", "# only just wound up, the last occurence was earlier today", "return", "\"finished\"" ]
The current status of the event (started, finished or pending).
[ "The", "current", "status", "of", "the", "event", "(", "started", "finished", "or", "pending", ")", "." ]
python
train
46.5
aio-libs/aioredis
aioredis/commands/string.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/string.py#L200-L212
def setbit(self, key, offset, value): """Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less than 0 or value is not 0 or 1 """ if not isinstance(offset, int): raise TypeError("offset argument must be int") if offset < 0: raise ValueError("offset must be greater equal 0") if value not in (0, 1): raise ValueError("value argument must be either 1 or 0") return self.execute(b'SETBIT', key, offset, value)
[ "def", "setbit", "(", "self", ",", "key", ",", "offset", ",", "value", ")", ":", "if", "not", "isinstance", "(", "offset", ",", "int", ")", ":", "raise", "TypeError", "(", "\"offset argument must be int\"", ")", "if", "offset", "<", "0", ":", "raise", "ValueError", "(", "\"offset must be greater equal 0\"", ")", "if", "value", "not", "in", "(", "0", ",", "1", ")", ":", "raise", "ValueError", "(", "\"value argument must be either 1 or 0\"", ")", "return", "self", ".", "execute", "(", "b'SETBIT'", ",", "key", ",", "offset", ",", "value", ")" ]
Sets or clears the bit at offset in the string value stored at key. :raises TypeError: if offset is not int :raises ValueError: if offset is less than 0 or value is not 0 or 1
[ "Sets", "or", "clears", "the", "bit", "at", "offset", "in", "the", "string", "value", "stored", "at", "key", "." ]
python
train
45.076923
pywavefront/PyWavefront
pywavefront/material.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/material.py#L217-L220
def parse_map_Ka(self): """Ambient map""" Kd = os.path.join(self.dir, " ".join(self.values[1:])) self.this_material.set_texture_ambient(Kd)
[ "def", "parse_map_Ka", "(", "self", ")", ":", "Kd", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dir", ",", "\" \"", ".", "join", "(", "self", ".", "values", "[", "1", ":", "]", ")", ")", "self", ".", "this_material", ".", "set_texture_ambient", "(", "Kd", ")" ]
Ambient map
[ "Ambient", "map" ]
python
train
40
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L3099-L3115
def get_parent_vault_ids(self, vault_id): """Gets the parent ``Ids`` of the given vault. arg: vault_id (osid.id.Id): a vault ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the vault raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=vault_id) return self._hierarchy_session.get_parents(id_=vault_id)
[ "def", "get_parent_vault_ids", "(", "self", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_parent_bin_ids", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_parent_catalog_ids", "(", "catalog_id", "=", "vault_id", ")", "return", "self", ".", "_hierarchy_session", ".", "get_parents", "(", "id_", "=", "vault_id", ")" ]
Gets the parent ``Ids`` of the given vault. arg: vault_id (osid.id.Id): a vault ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the vault raise: NotFound - ``vault_id`` is not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "parent", "Ids", "of", "the", "given", "vault", "." ]
python
train
47.588235
numenta/htmresearch
projects/nik/nik_htm.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/nik/nik_htm.py#L284-L289
def load(self, filename="temp.pkl"): """ Save TM in the filename specified above """ inputFile = open(filename, 'rb') self.tm = cPickle.load(inputFile)
[ "def", "load", "(", "self", ",", "filename", "=", "\"temp.pkl\"", ")", ":", "inputFile", "=", "open", "(", "filename", ",", "'rb'", ")", "self", ".", "tm", "=", "cPickle", ".", "load", "(", "inputFile", ")" ]
Save TM in the filename specified above
[ "Save", "TM", "in", "the", "filename", "specified", "above" ]
python
train
27.666667
ambitioninc/rabbitmq-admin
rabbitmq_admin/api.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L123-L132
def list_connection_channels(self, name): """ List of all channels for a given connection. :param name: The connection name :type name: str """ return self._api_get('/api/connections/{0}/channels'.format( urllib.parse.quote_plus(name) ))
[ "def", "list_connection_channels", "(", "self", ",", "name", ")", ":", "return", "self", ".", "_api_get", "(", "'/api/connections/{0}/channels'", ".", "format", "(", "urllib", ".", "parse", ".", "quote_plus", "(", "name", ")", ")", ")" ]
List of all channels for a given connection. :param name: The connection name :type name: str
[ "List", "of", "all", "channels", "for", "a", "given", "connection", "." ]
python
train
29.7
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L137-L155
def _get_object(data, position, obj_end, opts, dummy): """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size = _UNPACK_INT(data[position:position + 4])[0] end = position + obj_size - 1 if data[end:position + obj_size] != b"\x00": raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") if _raw_document_class(opts.document_class): return (opts.document_class(data[position:end + 1], opts), position + obj_size) obj = _elements_to_dict(data, position + 4, end, opts) position += obj_size if "$ref" in obj: return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position
[ "def", "_get_object", "(", "data", ",", "position", ",", "obj_end", ",", "opts", ",", "dummy", ")", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", "position", ":", "position", "+", "4", "]", ")", "[", "0", "]", "end", "=", "position", "+", "obj_size", "-", "1", "if", "data", "[", "end", ":", "position", "+", "obj_size", "]", "!=", "b\"\\x00\"", ":", "raise", "InvalidBSON", "(", "\"bad eoo\"", ")", "if", "end", ">=", "obj_end", ":", "raise", "InvalidBSON", "(", "\"invalid object length\"", ")", "if", "_raw_document_class", "(", "opts", ".", "document_class", ")", ":", "return", "(", "opts", ".", "document_class", "(", "data", "[", "position", ":", "end", "+", "1", "]", ",", "opts", ")", ",", "position", "+", "obj_size", ")", "obj", "=", "_elements_to_dict", "(", "data", ",", "position", "+", "4", ",", "end", ",", "opts", ")", "position", "+=", "obj_size", "if", "\"$ref\"", "in", "obj", ":", "return", "(", "DBRef", "(", "obj", ".", "pop", "(", "\"$ref\"", ")", ",", "obj", ".", "pop", "(", "\"$id\"", ",", "None", ")", ",", "obj", ".", "pop", "(", "\"$db\"", ",", "None", ")", ",", "obj", ")", ",", "position", ")", "return", "obj", ",", "position" ]
Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.
[ "Decode", "a", "BSON", "subdocument", "to", "opts", ".", "document_class", "or", "bson", ".", "dbref", ".", "DBRef", "." ]
python
train
40.842105
six8/corona-cipr
src/cipr/commands/core.py
https://github.com/six8/corona-cipr/blob/a2f45761080c874afa39bf95fd5c4467c8eae272/src/cipr/commands/core.py#L206-L264
def build(env, ciprcfg, console): """ Build the current project for distribution """ os.putenv('CIPR_PACKAGES', env.package_dir) os.putenv('CIPR_PROJECT', env.project_directory) build_settings = path.join(env.project_directory, 'build.settings') with open(build_settings, 'r') as f: data = f.read() m = _build_re.search(data) if m: ver = int(m.group(2)) data = data.replace(m.group(0), 'CFBundleVersion = "%d"' % (ver + 1)) with open(build_settings, 'w') as f: f.write(data) if path.exists(env.build_dir): shutil.rmtree(env.build_dir) os.makedirs(env.build_dir) if path.exists(env.dist_dir): shutil.rmtree(env.dist_dir) os.makedirs(env.dist_dir) console.normal('Building in %s' % env.build_dir) console.normal('Copy project files...') for src, dst in util.sync_dir_to(env.project_directory, env.build_dir, exclude=['.cipr', '.git', 'build', 'dist', '.*']): console.quiet(' %s -> %s' % (src, dst)) if src.endswith('.lua'): _fix_lua_module_name(src, dst) console.normal('Copy cipr packages...') for package in ciprcfg.packages.keys(): for src, dst in util.sync_lua_dir_to(path.join(env.package_dir, package), env.build_dir, exclude=['.git'], include=['*.lua']): console.quiet(' %s -> %s' % (src, dst)) if src.endswith('.lua'): _fix_lua_module_name(src, dst) src = path.join(env.code_dir, 'cipr.lua') dst = path.join(env.build_dir, 'cipr.lua') shutil.copy(src, dst) cmd = AND(clom.cd(env.build_dir), clom[CORONA_SIMULATOR_PATH](env.build_dir)) console.normal('Be sure to output your app to %s' % env.dist_dir) try: cmd.shell.execute() except KeyboardInterrupt: pass
[ "def", "build", "(", "env", ",", "ciprcfg", ",", "console", ")", ":", "os", ".", "putenv", "(", "'CIPR_PACKAGES'", ",", "env", ".", "package_dir", ")", "os", ".", "putenv", "(", "'CIPR_PROJECT'", ",", "env", ".", "project_directory", ")", "build_settings", "=", "path", ".", "join", "(", "env", ".", "project_directory", ",", "'build.settings'", ")", "with", "open", "(", "build_settings", ",", "'r'", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "m", "=", "_build_re", ".", "search", "(", "data", ")", "if", "m", ":", "ver", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "data", "=", "data", ".", "replace", "(", "m", ".", "group", "(", "0", ")", ",", "'CFBundleVersion = \"%d\"'", "%", "(", "ver", "+", "1", ")", ")", "with", "open", "(", "build_settings", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "if", "path", ".", "exists", "(", "env", ".", "build_dir", ")", ":", "shutil", ".", "rmtree", "(", "env", ".", "build_dir", ")", "os", ".", "makedirs", "(", "env", ".", "build_dir", ")", "if", "path", ".", "exists", "(", "env", ".", "dist_dir", ")", ":", "shutil", ".", "rmtree", "(", "env", ".", "dist_dir", ")", "os", ".", "makedirs", "(", "env", ".", "dist_dir", ")", "console", ".", "normal", "(", "'Building in %s'", "%", "env", ".", "build_dir", ")", "console", ".", "normal", "(", "'Copy project files...'", ")", "for", "src", ",", "dst", "in", "util", ".", "sync_dir_to", "(", "env", ".", "project_directory", ",", "env", ".", "build_dir", ",", "exclude", "=", "[", "'.cipr'", ",", "'.git'", ",", "'build'", ",", "'dist'", ",", "'.*'", "]", ")", ":", "console", ".", "quiet", "(", "' %s -> %s'", "%", "(", "src", ",", "dst", ")", ")", "if", "src", ".", "endswith", "(", "'.lua'", ")", ":", "_fix_lua_module_name", "(", "src", ",", "dst", ")", "console", ".", "normal", "(", "'Copy cipr packages...'", ")", "for", "package", "in", "ciprcfg", ".", "packages", ".", "keys", "(", ")", ":", "for", "src", ",", "dst", "in", "util", ".", "sync_lua_dir_to", "(", "path", ".", "join", "(", "env", ".", "package_dir", ",", "package", ")", ",", "env", ".", "build_dir", ",", "exclude", "=", "[", "'.git'", "]", ",", "include", "=", "[", "'*.lua'", "]", ")", ":", "console", ".", "quiet", "(", "' %s -> %s'", "%", "(", "src", ",", "dst", ")", ")", "if", "src", ".", "endswith", "(", "'.lua'", ")", ":", "_fix_lua_module_name", "(", "src", ",", "dst", ")", "src", "=", "path", ".", "join", "(", "env", ".", "code_dir", ",", "'cipr.lua'", ")", "dst", "=", "path", ".", "join", "(", "env", ".", "build_dir", ",", "'cipr.lua'", ")", "shutil", ".", "copy", "(", "src", ",", "dst", ")", "cmd", "=", "AND", "(", "clom", ".", "cd", "(", "env", ".", "build_dir", ")", ",", "clom", "[", "CORONA_SIMULATOR_PATH", "]", "(", "env", ".", "build_dir", ")", ")", "console", ".", "normal", "(", "'Be sure to output your app to %s'", "%", "env", ".", "dist_dir", ")", "try", ":", "cmd", ".", "shell", ".", "execute", "(", ")", "except", "KeyboardInterrupt", ":", "pass" ]
Build the current project for distribution
[ "Build", "the", "current", "project", "for", "distribution" ]
python
train
30.084746
MolSSI-BSE/basis_set_exchange
basis_set_exchange/lut.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/lut.py#L198-L224
def amchar_to_int(amchar, hij=False): '''Convert an angular momentum integer to a character The return value is a list of integers (to handle sp, spd, ... orbitals) For example, converts 'p' to [1] and 'sp' to [0,1] If hij is True, the ordering spdfghijkl is used. Otherwise, the ordering will be spdfghikl (skipping j) ''' if hij: amchar_map = _amchar_map_hij else: amchar_map = _amchar_map_hik amchar_lower = amchar.lower() amint = [] for c in amchar_lower: if c not in amchar_map: raise KeyError('Angular momentum character {} is not valid'.format(c)) amint.append(amchar_map.index(c)) return amint
[ "def", "amchar_to_int", "(", "amchar", ",", "hij", "=", "False", ")", ":", "if", "hij", ":", "amchar_map", "=", "_amchar_map_hij", "else", ":", "amchar_map", "=", "_amchar_map_hik", "amchar_lower", "=", "amchar", ".", "lower", "(", ")", "amint", "=", "[", "]", "for", "c", "in", "amchar_lower", ":", "if", "c", "not", "in", "amchar_map", ":", "raise", "KeyError", "(", "'Angular momentum character {} is not valid'", ".", "format", "(", "c", ")", ")", "amint", ".", "append", "(", "amchar_map", ".", "index", "(", "c", ")", ")", "return", "amint" ]
Convert an angular momentum integer to a character The return value is a list of integers (to handle sp, spd, ... orbitals) For example, converts 'p' to [1] and 'sp' to [0,1] If hij is True, the ordering spdfghijkl is used. Otherwise, the ordering will be spdfghikl (skipping j)
[ "Convert", "an", "angular", "momentum", "integer", "to", "a", "character" ]
python
train
25.037037
eaton-lab/toytree
toytree/Multitree.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Multitree.py#L201-L280
def draw_tree_grid(self, nrows=None, ncols=None, start=0, fixed_order=False, shared_axis=False, **kwargs): """ Draw a slice of x*y trees into a x,y grid non-overlapping. Parameters: ----------- x (int): Number of grid cells in x dimension. Default=automatically set. y (int): Number of grid cells in y dimension. Default=automatically set. start (int): Starting index of tree slice from .treelist. kwargs (dict): Toytree .draw() arguments as a dictionary. """ # return nothing if tree is empty if not self.treelist: print("Treelist is empty") return None, None # make a copy of the treelist so we don't modify the original if not fixed_order: treelist = self.copy().treelist else: if fixed_order is True: fixed_order = self.treelist[0].get_tip_labels() treelist = [ ToyTree(i, fixed_order=fixed_order) for i in self.copy().treelist ] # apply kwargs styles to the individual tree styles for tree in treelist: tree.style.update(kwargs) # get reasonable values for x,y given treelist length if not (ncols or nrows): ncols = 5 nrows = 1 elif not (ncols and nrows): if ncols: if ncols == 1: if self.ntrees <= 5: nrows = self.ntrees else: nrows = 2 else: if self.ntrees <= 10: nrows = 2 else: nrows = 3 if nrows: if nrows == 1: if self.ntrees <= 5: ncols = self.ntrees else: ncols = 5 else: if self.ntrees <= 10: ncols = 5 else: ncols = 3 else: pass # Return TereGrid object for debugging draw = TreeGrid(treelist) if kwargs.get("debug"): return draw # Call update to draw plot. Kwargs still here for width, height, axes canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs) return canvas, axes
[ "def", "draw_tree_grid", "(", "self", ",", "nrows", "=", "None", ",", "ncols", "=", "None", ",", "start", "=", "0", ",", "fixed_order", "=", "False", ",", "shared_axis", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# return nothing if tree is empty", "if", "not", "self", ".", "treelist", ":", "print", "(", "\"Treelist is empty\"", ")", "return", "None", ",", "None", "# make a copy of the treelist so we don't modify the original", "if", "not", "fixed_order", ":", "treelist", "=", "self", ".", "copy", "(", ")", ".", "treelist", "else", ":", "if", "fixed_order", "is", "True", ":", "fixed_order", "=", "self", ".", "treelist", "[", "0", "]", ".", "get_tip_labels", "(", ")", "treelist", "=", "[", "ToyTree", "(", "i", ",", "fixed_order", "=", "fixed_order", ")", "for", "i", "in", "self", ".", "copy", "(", ")", ".", "treelist", "]", "# apply kwargs styles to the individual tree styles", "for", "tree", "in", "treelist", ":", "tree", ".", "style", ".", "update", "(", "kwargs", ")", "# get reasonable values for x,y given treelist length", "if", "not", "(", "ncols", "or", "nrows", ")", ":", "ncols", "=", "5", "nrows", "=", "1", "elif", "not", "(", "ncols", "and", "nrows", ")", ":", "if", "ncols", ":", "if", "ncols", "==", "1", ":", "if", "self", ".", "ntrees", "<=", "5", ":", "nrows", "=", "self", ".", "ntrees", "else", ":", "nrows", "=", "2", "else", ":", "if", "self", ".", "ntrees", "<=", "10", ":", "nrows", "=", "2", "else", ":", "nrows", "=", "3", "if", "nrows", ":", "if", "nrows", "==", "1", ":", "if", "self", ".", "ntrees", "<=", "5", ":", "ncols", "=", "self", ".", "ntrees", "else", ":", "ncols", "=", "5", "else", ":", "if", "self", ".", "ntrees", "<=", "10", ":", "ncols", "=", "5", "else", ":", "ncols", "=", "3", "else", ":", "pass", "# Return TereGrid object for debugging", "draw", "=", "TreeGrid", "(", "treelist", ")", "if", "kwargs", ".", "get", "(", "\"debug\"", ")", ":", "return", "draw", "# Call update to draw plot. Kwargs still here for width, height, axes", "canvas", ",", "axes", "=", "draw", ".", "update", "(", "nrows", ",", "ncols", ",", "start", ",", "shared_axis", ",", "*", "*", "kwargs", ")", "return", "canvas", ",", "axes" ]
Draw a slice of x*y trees into a x,y grid non-overlapping. Parameters: ----------- x (int): Number of grid cells in x dimension. Default=automatically set. y (int): Number of grid cells in y dimension. Default=automatically set. start (int): Starting index of tree slice from .treelist. kwargs (dict): Toytree .draw() arguments as a dictionary.
[ "Draw", "a", "slice", "of", "x", "*", "y", "trees", "into", "a", "x", "y", "grid", "non", "-", "overlapping", "." ]
python
train
30.9375
hozn/coilmq
coilmq/start.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/start.py#L207-L216
def main(config, host, port, logfile, debug, daemon, uid, gid, pidfile, umask, rundir): """ Main entry point for running a socket server from the commandline. This method will read in options from the commandline and call the L{config.init_config} method to get everything setup. Then, depending on whether deamon mode was specified or not, the process may be forked (or not) and the server will be started. """ _main(**locals())
[ "def", "main", "(", "config", ",", "host", ",", "port", ",", "logfile", ",", "debug", ",", "daemon", ",", "uid", ",", "gid", ",", "pidfile", ",", "umask", ",", "rundir", ")", ":", "_main", "(", "*", "*", "locals", "(", ")", ")" ]
Main entry point for running a socket server from the commandline. This method will read in options from the commandline and call the L{config.init_config} method to get everything setup. Then, depending on whether deamon mode was specified or not, the process may be forked (or not) and the server will be started.
[ "Main", "entry", "point", "for", "running", "a", "socket", "server", "from", "the", "commandline", "." ]
python
train
45.2
saltstack/salt
salt/states/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/virt.py#L187-L210
def stopped(name, connection=None, username=None, password=None): ''' Stops a VM by shutting it down nicely. .. versionadded:: 2016.3.0 :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: yaml domain_name: virt.stopped ''' return _virt_call(name, 'shutdown', 'stopped', "Machine has been shut down", connection=connection, username=username, password=password)
[ "def", "stopped", "(", "name", ",", "connection", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "return", "_virt_call", "(", "name", ",", "'shutdown'", ",", "'stopped'", ",", "\"Machine has been shut down\"", ",", "connection", "=", "connection", ",", "username", "=", "username", ",", "password", "=", "password", ")" ]
Stops a VM by shutting it down nicely. .. versionadded:: 2016.3.0 :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: yaml domain_name: virt.stopped
[ "Stops", "a", "VM", "by", "shutting", "it", "down", "nicely", "." ]
python
train
28.291667
kensho-technologies/graphql-compiler
graphql_compiler/compiler/emit_sql.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/emit_sql.py#L75-L90
def _create_table_and_update_context(node, context): """Create an aliased table for a SqlNode. Updates the relevant Selectable global context. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Table, the newly aliased SQLAlchemy table. """ schema_type_name = sql_context_helpers.get_schema_type_name(node, context) table = context.compiler_metadata.get_table(schema_type_name).alias() context.query_path_to_selectable[node.query_path] = table return table
[ "def", "_create_table_and_update_context", "(", "node", ",", "context", ")", ":", "schema_type_name", "=", "sql_context_helpers", ".", "get_schema_type_name", "(", "node", ",", "context", ")", "table", "=", "context", ".", "compiler_metadata", ".", "get_table", "(", "schema_type_name", ")", ".", "alias", "(", ")", "context", ".", "query_path_to_selectable", "[", "node", ".", "query_path", "]", "=", "table", "return", "table" ]
Create an aliased table for a SqlNode. Updates the relevant Selectable global context. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Table, the newly aliased SQLAlchemy table.
[ "Create", "an", "aliased", "table", "for", "a", "SqlNode", "." ]
python
train
35.5625
PaloAltoNetworks/pancloud
pancloud/directorysync.py
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/directorysync.py#L61-L86
def attributes(self, **kwargs): # pragma: no cover """Retrieve the attribute configuration object. Retrieves a mapping that identifies the custom directory attributes configured for the Directory SyncService instance, and the mapping of the custom attributes to standard directory attributes. Args: **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``directory_attributes.py`` example. """ path = "/directory-sync-service/v1/attributes" r = self._httpclient.request( method="GET", path=path, url=self.url, **kwargs ) return r
[ "def", "attributes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "path", "=", "\"/directory-sync-service/v1/attributes\"", "r", "=", "self", ".", "_httpclient", ".", "request", "(", "method", "=", "\"GET\"", ",", "path", "=", "path", ",", "url", "=", "self", ".", "url", ",", "*", "*", "kwargs", ")", "return", "r" ]
Retrieve the attribute configuration object. Retrieves a mapping that identifies the custom directory attributes configured for the Directory SyncService instance, and the mapping of the custom attributes to standard directory attributes. Args: **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``directory_attributes.py`` example.
[ "Retrieve", "the", "attribute", "configuration", "object", "." ]
python
train
30.730769
rocky/python-uncompyle6
uncompyle6/semantics/fragments.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/semantics/fragments.py#L1858-L1884
def code_deparse_around_offset(name, offset, co, out=StringIO(), version=None, is_pypy=None, debug_opts=DEFAULT_DEBUG_OPTS): """ Like deparse_code(), but given a function/module name and offset, finds the node closest to offset. If offset is not an instruction boundary, we raise an IndexError. """ assert iscode(co) if version is None: version = sysinfo2float() if is_pypy is None: is_pypy = IS_PYPY deparsed = code_deparse(co, out, version, is_pypy, debug_opts) if (name, offset) in deparsed.offsets.keys(): # This is the easy case return deparsed valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)] offset_list = sorted([t[1] for t in valid_offsets if t[0] == name]) # FIXME: should check for branching? found_offset = find_gt(offset_list, offset) deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset] return deparsed
[ "def", "code_deparse_around_offset", "(", "name", ",", "offset", ",", "co", ",", "out", "=", "StringIO", "(", ")", ",", "version", "=", "None", ",", "is_pypy", "=", "None", ",", "debug_opts", "=", "DEFAULT_DEBUG_OPTS", ")", ":", "assert", "iscode", "(", "co", ")", "if", "version", "is", "None", ":", "version", "=", "sysinfo2float", "(", ")", "if", "is_pypy", "is", "None", ":", "is_pypy", "=", "IS_PYPY", "deparsed", "=", "code_deparse", "(", "co", ",", "out", ",", "version", ",", "is_pypy", ",", "debug_opts", ")", "if", "(", "name", ",", "offset", ")", "in", "deparsed", ".", "offsets", ".", "keys", "(", ")", ":", "# This is the easy case", "return", "deparsed", "valid_offsets", "=", "[", "t", "for", "t", "in", "deparsed", ".", "offsets", "if", "isinstance", "(", "t", "[", "1", "]", ",", "int", ")", "]", "offset_list", "=", "sorted", "(", "[", "t", "[", "1", "]", "for", "t", "in", "valid_offsets", "if", "t", "[", "0", "]", "==", "name", "]", ")", "# FIXME: should check for branching?", "found_offset", "=", "find_gt", "(", "offset_list", ",", "offset", ")", "deparsed", ".", "offsets", "[", "name", ",", "offset", "]", "=", "deparsed", ".", "offsets", "[", "name", ",", "found_offset", "]", "return", "deparsed" ]
Like deparse_code(), but given a function/module name and offset, finds the node closest to offset. If offset is not an instruction boundary, we raise an IndexError.
[ "Like", "deparse_code", "()", "but", "given", "a", "function", "/", "module", "name", "and", "offset", "finds", "the", "node", "closest", "to", "offset", ".", "If", "offset", "is", "not", "an", "instruction", "boundary", "we", "raise", "an", "IndexError", "." ]
python
train
36.740741
wonambi-python/wonambi
wonambi/trans/extern/dpss.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/trans/extern/dpss.py#L8-L133
def dpss_windows(N, NW, Kmax, interp_from=None, interp_kind='linear'): """ Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. Parameters ---------- N : int sequence length NW : float, unitless standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt but with dt taken as 1 Kmax : int number of DPSS windows to return is Kmax (orders 0 through Kmax-1) interp_from : int (optional) The dpss can be calculated using interpolation from a set of dpss with the same NW and Kmax, but shorter N. This is the length of this shorter set of dpss windows. interp_kind : str (optional) This input variable is passed to scipy.interpolate.interp1d and specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the order of the spline interpolator to use. Returns ------- v, e : tuple, v is an array of DPSS windows shaped (Kmax, N) e are the eigenvalues Notes ----- Tridiagonal form of DPSS calculation from: Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and uncertainty V: The discrete case. Bell System Technical Journal, Volume 57 (1978), 1371430 """ Kmax = int(Kmax) W = float(NW) / N nidx = np.arange(N, dtype='d') # In this case, we create the dpss windows of the smaller size # (interp_from) and then interpolate to the larger size (N) if interp_from is not None: if interp_from > N: e_s = 'In dpss_windows, interp_from is: %s ' % interp_from e_s += 'and N is: %s. ' % N e_s += 'Please enter interp_from smaller than N.' raise ValueError(e_s) dpss = [] d, e = dpss_windows(interp_from, NW, Kmax) for this_d in d: x = np.arange(this_d.shape[-1]) I = interpolate.interp1d(x, this_d, kind=interp_kind) d_temp = I(np.linspace(0, this_d.shape[-1] - 1, N, endpoint=False)) # Rescale: d_temp = d_temp / np.sqrt(np.sum(d_temp ** 2)) dpss.append(d_temp) dpss = np.array(dpss) else: # here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian # sequences, or discrete prolate spheroidal sequences (DPSS). Only the # first K, K = 2NW/dt orders of DPSS will exhibit good spectral # concentration # [see http://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here I set up an alternative symmetric tri-diagonal eigenvalue # problem such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1] # and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1] # [see Percival and Walden, 1993] diagonal = ((N - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) off_diag = np.zeros_like(nidx) off_diag[:-1] = nidx[1:] * (N - nidx[1:]) / 2. # put the diagonals in LAPACK "packed" storage ab = np.zeros((2, N), 'd') ab[1] = diagonal ab[0, 1:] = off_diag[:-1] # only calculate the highest Kmax eigenvalues w = linalg.eigvals_banded(ab, select='i', select_range=(N - Kmax, N - 1)) w = w[::-1] # find the corresponding eigenvectors via inverse iteration t = np.linspace(0, np.pi, N) dpss = np.zeros((Kmax, N), 'd') for k in range(Kmax): dpss[k] = tridi_inverse_iteration( diagonal, off_diag, w[k], x0=np.sin((k + 1) * t) ) # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. # * antisymmetric tapers should begin with a positive lobe fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2 * i] *= -1 # rather than test the sign of one point, test the sign of the # linear slope up to the first (largest) peak pk = np.argmax(np.abs(dpss[1::2, :N//2]), axis=1) for i, p in enumerate(pk): if np.sum(dpss[2 * i + 1, :p]) < 0: dpss[2 * i + 1] *= -1 # Now find the eigenvalues of the original spectral concentration problem # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 dpss_rxx = autocorr(dpss) * N r = 4 * W * np.sinc(2 * W * nidx) r[0] = 2 * W eigvals = np.dot(dpss_rxx, r) return dpss, eigvals
[ "def", "dpss_windows", "(", "N", ",", "NW", ",", "Kmax", ",", "interp_from", "=", "None", ",", "interp_kind", "=", "'linear'", ")", ":", "Kmax", "=", "int", "(", "Kmax", ")", "W", "=", "float", "(", "NW", ")", "/", "N", "nidx", "=", "np", ".", "arange", "(", "N", ",", "dtype", "=", "'d'", ")", "# In this case, we create the dpss windows of the smaller size", "# (interp_from) and then interpolate to the larger size (N)", "if", "interp_from", "is", "not", "None", ":", "if", "interp_from", ">", "N", ":", "e_s", "=", "'In dpss_windows, interp_from is: %s '", "%", "interp_from", "e_s", "+=", "'and N is: %s. '", "%", "N", "e_s", "+=", "'Please enter interp_from smaller than N.'", "raise", "ValueError", "(", "e_s", ")", "dpss", "=", "[", "]", "d", ",", "e", "=", "dpss_windows", "(", "interp_from", ",", "NW", ",", "Kmax", ")", "for", "this_d", "in", "d", ":", "x", "=", "np", ".", "arange", "(", "this_d", ".", "shape", "[", "-", "1", "]", ")", "I", "=", "interpolate", ".", "interp1d", "(", "x", ",", "this_d", ",", "kind", "=", "interp_kind", ")", "d_temp", "=", "I", "(", "np", ".", "linspace", "(", "0", ",", "this_d", ".", "shape", "[", "-", "1", "]", "-", "1", ",", "N", ",", "endpoint", "=", "False", ")", ")", "# Rescale:", "d_temp", "=", "d_temp", "/", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "d_temp", "**", "2", ")", ")", "dpss", ".", "append", "(", "d_temp", ")", "dpss", "=", "np", ".", "array", "(", "dpss", ")", "else", ":", "# here we want to set up an optimization problem to find a sequence", "# whose energy is maximally concentrated within band [-W,W].", "# Thus, the measure lambda(T,W) is the ratio between the energy within", "# that band, and the total energy. This leads to the eigen-system", "# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest", "# eigenvalue is the sequence with maximally concentrated energy. The", "# collection of eigenvectors of this system are called Slepian", "# sequences, or discrete prolate spheroidal sequences (DPSS). Only the", "# first K, K = 2NW/dt orders of DPSS will exhibit good spectral", "# concentration", "# [see http://en.wikipedia.org/wiki/Spectral_concentration_problem]", "# Here I set up an alternative symmetric tri-diagonal eigenvalue", "# problem such that", "# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)", "# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]", "# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]", "# [see Percival and Walden, 1993]", "diagonal", "=", "(", "(", "N", "-", "1", "-", "2", "*", "nidx", ")", "/", "2.", ")", "**", "2", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "*", "W", ")", "off_diag", "=", "np", ".", "zeros_like", "(", "nidx", ")", "off_diag", "[", ":", "-", "1", "]", "=", "nidx", "[", "1", ":", "]", "*", "(", "N", "-", "nidx", "[", "1", ":", "]", ")", "/", "2.", "# put the diagonals in LAPACK \"packed\" storage", "ab", "=", "np", ".", "zeros", "(", "(", "2", ",", "N", ")", ",", "'d'", ")", "ab", "[", "1", "]", "=", "diagonal", "ab", "[", "0", ",", "1", ":", "]", "=", "off_diag", "[", ":", "-", "1", "]", "# only calculate the highest Kmax eigenvalues", "w", "=", "linalg", ".", "eigvals_banded", "(", "ab", ",", "select", "=", "'i'", ",", "select_range", "=", "(", "N", "-", "Kmax", ",", "N", "-", "1", ")", ")", "w", "=", "w", "[", ":", ":", "-", "1", "]", "# find the corresponding eigenvectors via inverse iteration", "t", "=", "np", ".", "linspace", "(", "0", ",", "np", ".", "pi", ",", "N", ")", "dpss", "=", "np", ".", "zeros", "(", "(", "Kmax", ",", "N", ")", ",", "'d'", ")", "for", "k", "in", "range", "(", "Kmax", ")", ":", "dpss", "[", "k", "]", "=", "tridi_inverse_iteration", "(", "diagonal", ",", "off_diag", ",", "w", "[", "k", "]", ",", "x0", "=", "np", ".", "sin", "(", "(", "k", "+", "1", ")", "*", "t", ")", ")", "# By convention (Percival and Walden, 1993 pg 379)", "# * symmetric tapers (k=0,2,4,...) should have a positive average.", "# * antisymmetric tapers should begin with a positive lobe", "fix_symmetric", "=", "(", "dpss", "[", "0", ":", ":", "2", "]", ".", "sum", "(", "axis", "=", "1", ")", "<", "0", ")", "for", "i", ",", "f", "in", "enumerate", "(", "fix_symmetric", ")", ":", "if", "f", ":", "dpss", "[", "2", "*", "i", "]", "*=", "-", "1", "# rather than test the sign of one point, test the sign of the", "# linear slope up to the first (largest) peak", "pk", "=", "np", ".", "argmax", "(", "np", ".", "abs", "(", "dpss", "[", "1", ":", ":", "2", ",", ":", "N", "//", "2", "]", ")", ",", "axis", "=", "1", ")", "for", "i", ",", "p", "in", "enumerate", "(", "pk", ")", ":", "if", "np", ".", "sum", "(", "dpss", "[", "2", "*", "i", "+", "1", ",", ":", "p", "]", ")", "<", "0", ":", "dpss", "[", "2", "*", "i", "+", "1", "]", "*=", "-", "1", "# Now find the eigenvalues of the original spectral concentration problem", "# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390", "dpss_rxx", "=", "autocorr", "(", "dpss", ")", "*", "N", "r", "=", "4", "*", "W", "*", "np", ".", "sinc", "(", "2", "*", "W", "*", "nidx", ")", "r", "[", "0", "]", "=", "2", "*", "W", "eigvals", "=", "np", ".", "dot", "(", "dpss_rxx", ",", "r", ")", "return", "dpss", ",", "eigvals" ]
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. Parameters ---------- N : int sequence length NW : float, unitless standardized half bandwidth corresponding to 2NW = BW/f0 = BW*N*dt but with dt taken as 1 Kmax : int number of DPSS windows to return is Kmax (orders 0 through Kmax-1) interp_from : int (optional) The dpss can be calculated using interpolation from a set of dpss with the same NW and Kmax, but shorter N. This is the length of this shorter set of dpss windows. interp_kind : str (optional) This input variable is passed to scipy.interpolate.interp1d and specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the order of the spline interpolator to use. Returns ------- v, e : tuple, v is an array of DPSS windows shaped (Kmax, N) e are the eigenvalues Notes ----- Tridiagonal form of DPSS calculation from: Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and uncertainty V: The discrete case. Bell System Technical Journal, Volume 57 (1978), 1371430
[ "Returns", "the", "Discrete", "Prolate", "Spheroidal", "Sequences", "of", "orders", "[", "0", "Kmax", "-", "1", "]", "for", "a", "given", "frequency", "-", "spacing", "multiple", "NW", "and", "sequence", "length", "N", "." ]
python
train
40.285714
numenta/nupic
src/nupic/regions/record_sensor.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/record_sensor.py#L507-L533
def _convertNonNumericData(self, spatialOutput, temporalOutput, output): """ Converts all of the non-numeric fields from spatialOutput and temporalOutput into their scalar equivalents and records them in the output dictionary. :param spatialOutput: The results of topDownCompute() for the spatial input. :param temporalOutput: The results of topDownCompute() for the temporal input. :param output: The main dictionary of outputs passed to compute(). It is expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that are mapped to numpy arrays. """ encoders = self.encoder.getEncoderList() types = self.encoder.getDecoderOutputFieldTypes() for i, (encoder, type) in enumerate(zip(encoders, types)): spatialData = spatialOutput[i] temporalData = temporalOutput[i] if type != FieldMetaType.integer and type != FieldMetaType.float: # TODO: Make sure that this doesn't modify any state spatialData = encoder.getScalars(spatialData)[0] temporalData = encoder.getScalars(temporalData)[0] assert isinstance(spatialData, (float, int)) assert isinstance(temporalData, (float, int)) output['spatialTopDownOut'][i] = spatialData output['temporalTopDownOut'][i] = temporalData
[ "def", "_convertNonNumericData", "(", "self", ",", "spatialOutput", ",", "temporalOutput", ",", "output", ")", ":", "encoders", "=", "self", ".", "encoder", ".", "getEncoderList", "(", ")", "types", "=", "self", ".", "encoder", ".", "getDecoderOutputFieldTypes", "(", ")", "for", "i", ",", "(", "encoder", ",", "type", ")", "in", "enumerate", "(", "zip", "(", "encoders", ",", "types", ")", ")", ":", "spatialData", "=", "spatialOutput", "[", "i", "]", "temporalData", "=", "temporalOutput", "[", "i", "]", "if", "type", "!=", "FieldMetaType", ".", "integer", "and", "type", "!=", "FieldMetaType", ".", "float", ":", "# TODO: Make sure that this doesn't modify any state", "spatialData", "=", "encoder", ".", "getScalars", "(", "spatialData", ")", "[", "0", "]", "temporalData", "=", "encoder", ".", "getScalars", "(", "temporalData", ")", "[", "0", "]", "assert", "isinstance", "(", "spatialData", ",", "(", "float", ",", "int", ")", ")", "assert", "isinstance", "(", "temporalData", ",", "(", "float", ",", "int", ")", ")", "output", "[", "'spatialTopDownOut'", "]", "[", "i", "]", "=", "spatialData", "output", "[", "'temporalTopDownOut'", "]", "[", "i", "]", "=", "temporalData" ]
Converts all of the non-numeric fields from spatialOutput and temporalOutput into their scalar equivalents and records them in the output dictionary. :param spatialOutput: The results of topDownCompute() for the spatial input. :param temporalOutput: The results of topDownCompute() for the temporal input. :param output: The main dictionary of outputs passed to compute(). It is expected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that are mapped to numpy arrays.
[ "Converts", "all", "of", "the", "non", "-", "numeric", "fields", "from", "spatialOutput", "and", "temporalOutput", "into", "their", "scalar", "equivalents", "and", "records", "them", "in", "the", "output", "dictionary", "." ]
python
valid
47.259259
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L154-L160
def getOntology(self, id_): """ Returns the ontology with the specified ID. """ if id_ not in self._ontologyIdMap: raise exceptions.OntologyNotFoundException(id_) return self._ontologyIdMap[id_]
[ "def", "getOntology", "(", "self", ",", "id_", ")", ":", "if", "id_", "not", "in", "self", ".", "_ontologyIdMap", ":", "raise", "exceptions", ".", "OntologyNotFoundException", "(", "id_", ")", "return", "self", ".", "_ontologyIdMap", "[", "id_", "]" ]
Returns the ontology with the specified ID.
[ "Returns", "the", "ontology", "with", "the", "specified", "ID", "." ]
python
train
34.285714
aaugustin/websockets
src/websockets/headers.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/headers.py#L354-L366
def parse_subprotocol_item( header: str, pos: int, header_name: str ) -> Tuple[Subprotocol, int]: """ Parse a subprotocol from ``header`` at the given position. Return the subprotocol value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. """ item, pos = parse_token(header, pos, header_name) return cast(Subprotocol, item), pos
[ "def", "parse_subprotocol_item", "(", "header", ":", "str", ",", "pos", ":", "int", ",", "header_name", ":", "str", ")", "->", "Tuple", "[", "Subprotocol", ",", "int", "]", ":", "item", ",", "pos", "=", "parse_token", "(", "header", ",", "pos", ",", "header_name", ")", "return", "cast", "(", "Subprotocol", ",", "item", ")", ",", "pos" ]
Parse a subprotocol from ``header`` at the given position. Return the subprotocol value and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
[ "Parse", "a", "subprotocol", "from", "header", "at", "the", "given", "position", "." ]
python
train
30.692308
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2586-L2590
def organization_requests(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/requests#list-requests" api_path = "/api/v2/organizations/{id}/requests.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "organization_requests", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/organizations/{id}/requests.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/requests#list-requests
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "requests#list", "-", "requests" ]
python
train
54.6
getpelican/pelican-plugins
libravatar/libravatar.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/libravatar/libravatar.py#L29-L61
def add_libravatar (generator, metadata): """Article generator connector for the Libravatar plugin""" missing = generator.settings.get ('LIBRAVATAR_MISSING') size = generator.settings.get ('LIBRAVATAR_SIZE') ## Check the presence of the Email header if 'email' not in metadata.keys (): try: metadata ['email'] = generator.settings.get ('AUTHOR_EMAIL') except: pass ## Add the Libravatar URL if metadata ['email']: ## Compose URL using the MD5 hash ## (the ascii encoding is necessary for Python3) email = metadata ['email'].lower ().encode ('ascii') md5 = hashlib.md5 (email).hexdigest () url = 'http://cdn.libravatar.org/avatar/' + md5 ## Add eventual "missing picture" option if missing or size: url = url + '?' if missing: url = url + 'd=' + missing if size: url = url + '&' if size: url = url + 's=' + str (size) ## Add URL to the article's metadata metadata ['author_libravatar'] = url
[ "def", "add_libravatar", "(", "generator", ",", "metadata", ")", ":", "missing", "=", "generator", ".", "settings", ".", "get", "(", "'LIBRAVATAR_MISSING'", ")", "size", "=", "generator", ".", "settings", ".", "get", "(", "'LIBRAVATAR_SIZE'", ")", "## Check the presence of the Email header", "if", "'email'", "not", "in", "metadata", ".", "keys", "(", ")", ":", "try", ":", "metadata", "[", "'email'", "]", "=", "generator", ".", "settings", ".", "get", "(", "'AUTHOR_EMAIL'", ")", "except", ":", "pass", "## Add the Libravatar URL", "if", "metadata", "[", "'email'", "]", ":", "## Compose URL using the MD5 hash", "## (the ascii encoding is necessary for Python3)", "email", "=", "metadata", "[", "'email'", "]", ".", "lower", "(", ")", ".", "encode", "(", "'ascii'", ")", "md5", "=", "hashlib", ".", "md5", "(", "email", ")", ".", "hexdigest", "(", ")", "url", "=", "'http://cdn.libravatar.org/avatar/'", "+", "md5", "## Add eventual \"missing picture\" option", "if", "missing", "or", "size", ":", "url", "=", "url", "+", "'?'", "if", "missing", ":", "url", "=", "url", "+", "'d='", "+", "missing", "if", "size", ":", "url", "=", "url", "+", "'&'", "if", "size", ":", "url", "=", "url", "+", "'s='", "+", "str", "(", "size", ")", "## Add URL to the article's metadata", "metadata", "[", "'author_libravatar'", "]", "=", "url" ]
Article generator connector for the Libravatar plugin
[ "Article", "generator", "connector", "for", "the", "Libravatar", "plugin" ]
python
train
33.545455
pywavefront/PyWavefront
pywavefront/mesh.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/mesh.py#L61-L66
def add_material(self, material): """Add a material to the mesh, IF it's not already present.""" if self.has_material(material): return self.materials.append(material)
[ "def", "add_material", "(", "self", ",", "material", ")", ":", "if", "self", ".", "has_material", "(", "material", ")", ":", "return", "self", ".", "materials", ".", "append", "(", "material", ")" ]
Add a material to the mesh, IF it's not already present.
[ "Add", "a", "material", "to", "the", "mesh", "IF", "it", "s", "not", "already", "present", "." ]
python
train
33.166667
mapillary/mapillary_tools
mapillary_tools/exif_write.py
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_write.py#L57-L59
def add_image_history(self, data): """Add arbitrary string to ImageHistory tag.""" self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data)
[ "def", "add_image_history", "(", "self", ",", "data", ")", ":", "self", ".", "_ef", "[", "'0th'", "]", "[", "piexif", ".", "ImageIFD", ".", "ImageHistory", "]", "=", "json", ".", "dumps", "(", "data", ")" ]
Add arbitrary string to ImageHistory tag.
[ "Add", "arbitrary", "string", "to", "ImageHistory", "tag", "." ]
python
train
53.666667
luckydonald/pytgbot
code_generation/code_generator_template.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/code_generator_template.py#L52-L82
def clazz(clazz, parent_clazz, description, link, params_string, init_super_args=None): """ Live template for pycharm: y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$") """ variables_needed = [] variables_optional = [] imports = set() for param in params_string.split("\n"): variable = parse_param_types(param) # any variable.types has always_is_value => lenght must be 1. assert(not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1) if variable.optional: variables_optional.append(variable) else: variables_needed.append(variable) # end if imports.update(variable.all_imports) # end for imports = list(imports) imports.sort() if isinstance(parent_clazz, str): parent_clazz = to_type(parent_clazz, "parent class") assert isinstance(parent_clazz, Type) clazz_object = Clazz(imports=imports, clazz=clazz, parent_clazz=parent_clazz, link=link, description=description, parameters=variables_needed, keywords=variables_optional ) return clazz_object
[ "def", "clazz", "(", "clazz", ",", "parent_clazz", ",", "description", ",", "link", ",", "params_string", ",", "init_super_args", "=", "None", ")", ":", "variables_needed", "=", "[", "]", "variables_optional", "=", "[", "]", "imports", "=", "set", "(", ")", "for", "param", "in", "params_string", ".", "split", "(", "\"\\n\"", ")", ":", "variable", "=", "parse_param_types", "(", "param", ")", "# any variable.types has always_is_value => lenght must be 1.", "assert", "(", "not", "any", "(", "[", "type_", ".", "always_is_value", "is", "not", "None", "for", "type_", "in", "variable", ".", "types", "]", ")", "or", "len", "(", "variable", ".", "types", ")", "==", "1", ")", "if", "variable", ".", "optional", ":", "variables_optional", ".", "append", "(", "variable", ")", "else", ":", "variables_needed", ".", "append", "(", "variable", ")", "# end if", "imports", ".", "update", "(", "variable", ".", "all_imports", ")", "# end for", "imports", "=", "list", "(", "imports", ")", "imports", ".", "sort", "(", ")", "if", "isinstance", "(", "parent_clazz", ",", "str", ")", ":", "parent_clazz", "=", "to_type", "(", "parent_clazz", ",", "\"parent class\"", ")", "assert", "isinstance", "(", "parent_clazz", ",", "Type", ")", "clazz_object", "=", "Clazz", "(", "imports", "=", "imports", ",", "clazz", "=", "clazz", ",", "parent_clazz", "=", "parent_clazz", ",", "link", "=", "link", ",", "description", "=", "description", ",", "parameters", "=", "variables_needed", ",", "keywords", "=", "variables_optional", ")", "return", "clazz_object" ]
Live template for pycharm: y = clazz(clazz="$clazz$", parent_clazz="%parent$", description="$description$", link="$lnk$", params_string="$first_param$")
[ "Live", "template", "for", "pycharm", ":" ]
python
train
38.967742
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/util.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/util.py#L595-L626
def transplant_func(func, module): """ Make a function imported from module A appear as if it is located in module B. >>> from pprint import pprint >>> pprint.__module__ 'pprint' >>> pp = transplant_func(pprint, __name__) >>> pp.__module__ 'nose.util' The original function is not modified. >>> pprint.__module__ 'pprint' Calling the transplanted function calls the original. >>> pp([1, 2]) [1, 2] >>> pprint([1,2]) [1, 2] """ from nose.tools import make_decorator def newfunc(*arg, **kw): return func(*arg, **kw) newfunc = make_decorator(func)(newfunc) newfunc.__module__ = module return newfunc
[ "def", "transplant_func", "(", "func", ",", "module", ")", ":", "from", "nose", ".", "tools", "import", "make_decorator", "def", "newfunc", "(", "*", "arg", ",", "*", "*", "kw", ")", ":", "return", "func", "(", "*", "arg", ",", "*", "*", "kw", ")", "newfunc", "=", "make_decorator", "(", "func", ")", "(", "newfunc", ")", "newfunc", ".", "__module__", "=", "module", "return", "newfunc" ]
Make a function imported from module A appear as if it is located in module B. >>> from pprint import pprint >>> pprint.__module__ 'pprint' >>> pp = transplant_func(pprint, __name__) >>> pp.__module__ 'nose.util' The original function is not modified. >>> pprint.__module__ 'pprint' Calling the transplanted function calls the original. >>> pp([1, 2]) [1, 2] >>> pprint([1,2]) [1, 2]
[ "Make", "a", "function", "imported", "from", "module", "A", "appear", "as", "if", "it", "is", "located", "in", "module", "B", "." ]
python
test
21
common-workflow-language/cwltool
cwltool/utils.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/utils.py#L233-L238
def random_outdir(): # type: () -> Text """ Return the random directory name chosen to use for tool / workflow output """ # compute this once and store it as a function attribute - each subsequent call will return the same value if not hasattr(random_outdir, 'outdir'): random_outdir.outdir = '/' + ''.join([random.choice(string.ascii_letters) for _ in range(6)]) # type: ignore # nosec return random_outdir.outdir
[ "def", "random_outdir", "(", ")", ":", "# type: () -> Text", "# compute this once and store it as a function attribute - each subsequent call will return the same value", "if", "not", "hasattr", "(", "random_outdir", ",", "'outdir'", ")", ":", "random_outdir", ".", "outdir", "=", "'/'", "+", "''", ".", "join", "(", "[", "random", ".", "choice", "(", "string", ".", "ascii_letters", ")", "for", "_", "in", "range", "(", "6", ")", "]", ")", "# type: ignore # nosec", "return", "random_outdir", ".", "outdir" ]
Return the random directory name chosen to use for tool / workflow output
[ "Return", "the", "random", "directory", "name", "chosen", "to", "use", "for", "tool", "/", "workflow", "output" ]
python
train
72.666667
materialsproject/pymatgen
pymatgen/analysis/chemenv/coordination_environments/voronoi.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/voronoi.py#L750-L768
def from_dict(cls, d): """ Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using the as_dict method. :param d: dict representation of the VoronoiContainer object :return: VoronoiContainer object """ structure = Structure.from_dict(d['structure']) voronoi_list2 = from_bson_voronoi_list2(d['bson_nb_voro_list2'], structure) maximum_distance_factor = d['maximum_distance_factor'] if 'maximum_distance_factor' in d else None minimum_angle_factor = d['minimum_angle_factor'] if 'minimum_angle_factor' in d else None return cls(structure=structure, voronoi_list2=voronoi_list2, # neighbors_lists=neighbors_lists, normalized_angle_tolerance=d['normalized_angle_tolerance'], normalized_distance_tolerance=d['normalized_distance_tolerance'], additional_conditions=d['additional_conditions'], valences=d['valences'], maximum_distance_factor=maximum_distance_factor, minimum_angle_factor=minimum_angle_factor)
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "structure", "=", "Structure", ".", "from_dict", "(", "d", "[", "'structure'", "]", ")", "voronoi_list2", "=", "from_bson_voronoi_list2", "(", "d", "[", "'bson_nb_voro_list2'", "]", ",", "structure", ")", "maximum_distance_factor", "=", "d", "[", "'maximum_distance_factor'", "]", "if", "'maximum_distance_factor'", "in", "d", "else", "None", "minimum_angle_factor", "=", "d", "[", "'minimum_angle_factor'", "]", "if", "'minimum_angle_factor'", "in", "d", "else", "None", "return", "cls", "(", "structure", "=", "structure", ",", "voronoi_list2", "=", "voronoi_list2", ",", "# neighbors_lists=neighbors_lists,", "normalized_angle_tolerance", "=", "d", "[", "'normalized_angle_tolerance'", "]", ",", "normalized_distance_tolerance", "=", "d", "[", "'normalized_distance_tolerance'", "]", ",", "additional_conditions", "=", "d", "[", "'additional_conditions'", "]", ",", "valences", "=", "d", "[", "'valences'", "]", ",", "maximum_distance_factor", "=", "maximum_distance_factor", ",", "minimum_angle_factor", "=", "minimum_angle_factor", ")" ]
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using the as_dict method. :param d: dict representation of the VoronoiContainer object :return: VoronoiContainer object
[ "Reconstructs", "the", "VoronoiContainer", "object", "from", "a", "dict", "representation", "of", "the", "VoronoiContainer", "created", "using", "the", "as_dict", "method", ".", ":", "param", "d", ":", "dict", "representation", "of", "the", "VoronoiContainer", "object", ":", "return", ":", "VoronoiContainer", "object" ]
python
train
60.736842
PythonicNinja/pydrill
pydrill/client/__init__.py
https://github.com/PythonicNinja/pydrill/blob/0713e78c84d44cd438018e4ba1588a8e242f78c4/pydrill/client/__init__.py#L193-L211
def storage_update(self, name, config, timeout=10): """ Create or update a storage plugin configuration. :param name: The name of the storage plugin configuration to create or update. :param config: Overwrites the existing configuration if there is any, and therefore, must include all required attributes and definitions. :param timeout: int :return: pydrill.client.Result """ result = Result(*self.perform_request(**{ 'method': 'POST', 'url': '/storage/{0}.json'.format(name), 'body': config, 'params': { 'request_timeout': timeout } })) return result
[ "def", "storage_update", "(", "self", ",", "name", ",", "config", ",", "timeout", "=", "10", ")", ":", "result", "=", "Result", "(", "*", "self", ".", "perform_request", "(", "*", "*", "{", "'method'", ":", "'POST'", ",", "'url'", ":", "'/storage/{0}.json'", ".", "format", "(", "name", ")", ",", "'body'", ":", "config", ",", "'params'", ":", "{", "'request_timeout'", ":", "timeout", "}", "}", ")", ")", "return", "result" ]
Create or update a storage plugin configuration. :param name: The name of the storage plugin configuration to create or update. :param config: Overwrites the existing configuration if there is any, and therefore, must include all required attributes and definitions. :param timeout: int :return: pydrill.client.Result
[ "Create", "or", "update", "a", "storage", "plugin", "configuration", "." ]
python
train
36.842105
jgorset/django-respite
respite/utils/parsers.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/utils/parsers.py#L31-L62
def parse_http_accept_header(header): """ Return a list of content types listed in the HTTP Accept header ordered by quality. :param header: A string describing the contents of the HTTP Accept header. """ components = [item.strip() for item in header.split(',')] l = [] for component in components: if ';' in component: subcomponents = [item.strip() for item in component.split(';')] l.append( ( subcomponents[0], # eg. 'text/html' subcomponents[1][2:] # eg. 'q=0.9' ) ) else: l.append((component, '1')) l.sort( key = lambda i: i[1], reverse = True ) content_types = [] for i in l: content_types.append(i[0]) return content_types
[ "def", "parse_http_accept_header", "(", "header", ")", ":", "components", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "header", ".", "split", "(", "','", ")", "]", "l", "=", "[", "]", "for", "component", "in", "components", ":", "if", "';'", "in", "component", ":", "subcomponents", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "component", ".", "split", "(", "';'", ")", "]", "l", ".", "append", "(", "(", "subcomponents", "[", "0", "]", ",", "# eg. 'text/html'", "subcomponents", "[", "1", "]", "[", "2", ":", "]", "# eg. 'q=0.9'", ")", ")", "else", ":", "l", ".", "append", "(", "(", "component", ",", "'1'", ")", ")", "l", ".", "sort", "(", "key", "=", "lambda", "i", ":", "i", "[", "1", "]", ",", "reverse", "=", "True", ")", "content_types", "=", "[", "]", "for", "i", "in", "l", ":", "content_types", ".", "append", "(", "i", "[", "0", "]", ")", "return", "content_types" ]
Return a list of content types listed in the HTTP Accept header ordered by quality. :param header: A string describing the contents of the HTTP Accept header.
[ "Return", "a", "list", "of", "content", "types", "listed", "in", "the", "HTTP", "Accept", "header", "ordered", "by", "quality", "." ]
python
train
25.46875
MisterY/gnucash-portfolio
gnucash_portfolio/currencies.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/currencies.py#L132-L173
def import_fx_rates(self, rates: List[PriceModel]): """ Imports the given prices into database. Write operation! """ have_new_rates = False base_currency = self.get_default_currency() for rate in rates: assert isinstance(rate, PriceModel) currency = self.get_by_symbol(rate.symbol) amount = rate.value # Do not import duplicate prices. # todo: if the price differs, update it! # exists_query = exists(rates_query) has_rate = currency.prices.filter(Price.date == rate.datetime.date()).first() # has_rate = ( # self.book.session.query(Price) # .filter(Price.date == rate.date.date()) # .filter(Price.currency == currency) # ) if not has_rate: log(INFO, "Creating entry for %s, %s, %s, %s", base_currency.mnemonic, currency.mnemonic, rate.datetime.date(), amount) # Save the price in the exchange currency, not the default. # Invert the rate in that case. inverted_rate = 1 / amount inverted_rate = inverted_rate.quantize(Decimal('.00000000')) price = Price(commodity=currency, currency=base_currency, date=rate.datetime.date(), value=str(inverted_rate)) have_new_rates = True # Save the book after the prices have been created. if have_new_rates: log(INFO, "Saving new prices...") self.book.flush() self.book.save() else: log(INFO, "No prices imported.")
[ "def", "import_fx_rates", "(", "self", ",", "rates", ":", "List", "[", "PriceModel", "]", ")", ":", "have_new_rates", "=", "False", "base_currency", "=", "self", ".", "get_default_currency", "(", ")", "for", "rate", "in", "rates", ":", "assert", "isinstance", "(", "rate", ",", "PriceModel", ")", "currency", "=", "self", ".", "get_by_symbol", "(", "rate", ".", "symbol", ")", "amount", "=", "rate", ".", "value", "# Do not import duplicate prices.", "# todo: if the price differs, update it!", "# exists_query = exists(rates_query)", "has_rate", "=", "currency", ".", "prices", ".", "filter", "(", "Price", ".", "date", "==", "rate", ".", "datetime", ".", "date", "(", ")", ")", ".", "first", "(", ")", "# has_rate = (", "# self.book.session.query(Price)", "# .filter(Price.date == rate.date.date())", "# .filter(Price.currency == currency)", "# )", "if", "not", "has_rate", ":", "log", "(", "INFO", ",", "\"Creating entry for %s, %s, %s, %s\"", ",", "base_currency", ".", "mnemonic", ",", "currency", ".", "mnemonic", ",", "rate", ".", "datetime", ".", "date", "(", ")", ",", "amount", ")", "# Save the price in the exchange currency, not the default.", "# Invert the rate in that case.", "inverted_rate", "=", "1", "/", "amount", "inverted_rate", "=", "inverted_rate", ".", "quantize", "(", "Decimal", "(", "'.00000000'", ")", ")", "price", "=", "Price", "(", "commodity", "=", "currency", ",", "currency", "=", "base_currency", ",", "date", "=", "rate", ".", "datetime", ".", "date", "(", ")", ",", "value", "=", "str", "(", "inverted_rate", ")", ")", "have_new_rates", "=", "True", "# Save the book after the prices have been created.", "if", "have_new_rates", ":", "log", "(", "INFO", ",", "\"Saving new prices...\"", ")", "self", ".", "book", ".", "flush", "(", ")", "self", ".", "book", ".", "save", "(", ")", "else", ":", "log", "(", "INFO", ",", "\"No prices imported.\"", ")" ]
Imports the given prices into database. Write operation!
[ "Imports", "the", "given", "prices", "into", "database", ".", "Write", "operation!" ]
python
train
40.738095
5monkeys/django-bananas
bananas/__init__.py
https://github.com/5monkeys/django-bananas/blob/cfd318c737f6c4580036c13d2acf32bca96654bf/bananas/__init__.py#L4-L24
def get_version(version=None): """Derives a PEP386-compliant version number from VERSION.""" if version is None: version = VERSION assert len(version) == 5 assert version[3] in ("alpha", "beta", "rc", "final") # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases parts = 2 if version[2] == 0 else 3 main = ".".join(str(x) for x in version[:parts]) sub = "" if version[3] != "final": mapping = {"alpha": "a", "beta": "b", "rc": "c"} sub = mapping[version[3]] + str(version[4]) return main + sub
[ "def", "get_version", "(", "version", "=", "None", ")", ":", "if", "version", "is", "None", ":", "version", "=", "VERSION", "assert", "len", "(", "version", ")", "==", "5", "assert", "version", "[", "3", "]", "in", "(", "\"alpha\"", ",", "\"beta\"", ",", "\"rc\"", ",", "\"final\"", ")", "# Now build the two parts of the version number:", "# main = X.Y[.Z]", "# sub = .devN - for pre-alpha releases", "# | {a|b|c}N - for alpha, beta and rc releases", "parts", "=", "2", "if", "version", "[", "2", "]", "==", "0", "else", "3", "main", "=", "\".\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "version", "[", ":", "parts", "]", ")", "sub", "=", "\"\"", "if", "version", "[", "3", "]", "!=", "\"final\"", ":", "mapping", "=", "{", "\"alpha\"", ":", "\"a\"", ",", "\"beta\"", ":", "\"b\"", ",", "\"rc\"", ":", "\"c\"", "}", "sub", "=", "mapping", "[", "version", "[", "3", "]", "]", "+", "str", "(", "version", "[", "4", "]", ")", "return", "main", "+", "sub" ]
Derives a PEP386-compliant version number from VERSION.
[ "Derives", "a", "PEP386", "-", "compliant", "version", "number", "from", "VERSION", "." ]
python
test
31.238095
amperser/proselint
proselint/checks/mixed_metaphors/misc.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/mixed_metaphors/misc.py#L9-L27
def check_bottleneck(text): """Avoid mixing metaphors about bottles and their necks. source: Sir Ernest Gowers source_url: http://bit.ly/1CQPH61 """ err = "mixed_metaphors.misc.bottleneck" msg = u"Mixed metaphor — bottles with big necks are easy to pass through." list = [ "biggest bottleneck", "big bottleneck", "large bottleneck", "largest bottleneck", "world-wide bottleneck", "huge bottleneck", "massive bottleneck", ] return existence_check(text, list, err, msg, max_errors=1)
[ "def", "check_bottleneck", "(", "text", ")", ":", "err", "=", "\"mixed_metaphors.misc.bottleneck\"", "msg", "=", "u\"Mixed metaphor — bottles with big necks are easy to pass through.\"", "list", "=", "[", "\"biggest bottleneck\"", ",", "\"big bottleneck\"", ",", "\"large bottleneck\"", ",", "\"largest bottleneck\"", ",", "\"world-wide bottleneck\"", ",", "\"huge bottleneck\"", ",", "\"massive bottleneck\"", ",", "]", "return", "existence_check", "(", "text", ",", "list", ",", "err", ",", "msg", ",", "max_errors", "=", "1", ")" ]
Avoid mixing metaphors about bottles and their necks. source: Sir Ernest Gowers source_url: http://bit.ly/1CQPH61
[ "Avoid", "mixing", "metaphors", "about", "bottles", "and", "their", "necks", "." ]
python
train
29.526316
mohamedattahri/PyXMLi
pyxmli/__init__.py
https://github.com/mohamedattahri/PyXMLi/blob/a81a245be822d62f1a20c734ca14b42c786ae81e/pyxmli/__init__.py#L423-L441
def to_xml(self, name="address"): ''' Returns a DOM Element containing the XML representation of the address. @return:Element ''' for n, v in {"street_address": self.street_address, "city": self.city, "country": self.country}.items(): if is_empty_or_none(v): raise ValueError("'%s' attribute cannot be empty or None." % n) doc = Document() root = doc.createElement(name) self._create_text_node(root, "streetAddress", self.street_address, True) self._create_text_node(root, "city", self.city, True) self._create_text_node(root, "zipcode", self.zipcode) self._create_text_node(root, "state", self.state, True) self._create_text_node(root, "country", self.country) return root
[ "def", "to_xml", "(", "self", ",", "name", "=", "\"address\"", ")", ":", "for", "n", ",", "v", "in", "{", "\"street_address\"", ":", "self", ".", "street_address", ",", "\"city\"", ":", "self", ".", "city", ",", "\"country\"", ":", "self", ".", "country", "}", ".", "items", "(", ")", ":", "if", "is_empty_or_none", "(", "v", ")", ":", "raise", "ValueError", "(", "\"'%s' attribute cannot be empty or None.\"", "%", "n", ")", "doc", "=", "Document", "(", ")", "root", "=", "doc", ".", "createElement", "(", "name", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"streetAddress\"", ",", "self", ".", "street_address", ",", "True", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"city\"", ",", "self", ".", "city", ",", "True", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"zipcode\"", ",", "self", ".", "zipcode", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"state\"", ",", "self", ".", "state", ",", "True", ")", "self", ".", "_create_text_node", "(", "root", ",", "\"country\"", ",", "self", ".", "country", ")", "return", "root" ]
Returns a DOM Element containing the XML representation of the address. @return:Element
[ "Returns", "a", "DOM", "Element", "containing", "the", "XML", "representation", "of", "the", "address", "." ]
python
train
43
google/pyringe
pyringe/plugins/gdb_shell.py
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/gdb_shell.py#L38-L48
def StartGdb(self): """Hands control over to a new gdb process.""" if self.inferior.is_running: self.inferior.ShutDownGdb() program_arg = 'program %d ' % self.inferior.pid else: program_arg = '' os.system('gdb ' + program_arg + ' '.join(self.gdb_args)) reset_position = raw_input('Reset debugger position? [y]/n ') if not reset_position or reset_position == 'y' or reset_position == 'yes': self.position = None
[ "def", "StartGdb", "(", "self", ")", ":", "if", "self", ".", "inferior", ".", "is_running", ":", "self", ".", "inferior", ".", "ShutDownGdb", "(", ")", "program_arg", "=", "'program %d '", "%", "self", ".", "inferior", ".", "pid", "else", ":", "program_arg", "=", "''", "os", ".", "system", "(", "'gdb '", "+", "program_arg", "+", "' '", ".", "join", "(", "self", ".", "gdb_args", ")", ")", "reset_position", "=", "raw_input", "(", "'Reset debugger position? [y]/n '", ")", "if", "not", "reset_position", "or", "reset_position", "==", "'y'", "or", "reset_position", "==", "'yes'", ":", "self", ".", "position", "=", "None" ]
Hands control over to a new gdb process.
[ "Hands", "control", "over", "to", "a", "new", "gdb", "process", "." ]
python
train
40.727273
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L393-L399
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Takes the inverse sin of a vertex, Arcsin(vertex) :param input_vertex: the vertex """ return Double(context.jvm_view().ArcSinVertex, label, cast_to_double_vertex(input_vertex))
[ "def", "ArcSin", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "ArcSinVertex", ",", "label", ",", "cast_to_double_vertex", "(", "input_vertex", ")", ")" ]
Takes the inverse sin of a vertex, Arcsin(vertex) :param input_vertex: the vertex
[ "Takes", "the", "inverse", "sin", "of", "a", "vertex", "Arcsin", "(", "vertex", ")", ":", "param", "input_vertex", ":", "the", "vertex" ]
python
train
42
tariqdaouda/pyGeno
pyGeno/tools/Stats.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/Stats.py#L3-L11
def kullback_leibler(p, q) : """Discrete Kullback-Leibler divergence D(P||Q)""" p = np.asarray(p, dtype=np.float) q = np.asarray(q, dtype=np.float) if p.shape != q.shape : raise ValueError("p and q must be of the same dimensions") return np.sum(np.where(p > 0, np.log(p / q) * p, 0))
[ "def", "kullback_leibler", "(", "p", ",", "q", ")", ":", "p", "=", "np", ".", "asarray", "(", "p", ",", "dtype", "=", "np", ".", "float", ")", "q", "=", "np", ".", "asarray", "(", "q", ",", "dtype", "=", "np", ".", "float", ")", "if", "p", ".", "shape", "!=", "q", ".", "shape", ":", "raise", "ValueError", "(", "\"p and q must be of the same dimensions\"", ")", "return", "np", ".", "sum", "(", "np", ".", "where", "(", "p", ">", "0", ",", "np", ".", "log", "(", "p", "/", "q", ")", "*", "p", ",", "0", ")", ")" ]
Discrete Kullback-Leibler divergence D(P||Q)
[ "Discrete", "Kullback", "-", "Leibler", "divergence", "D", "(", "P||Q", ")" ]
python
train
31.666667
saltstack/salt
salt/modules/boto3_elasticache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_elasticache.py#L675-L687
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args): ''' Delete a cache security group. Example: .. code-block:: bash salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg ''' return _delete_resource(name, name_param='CacheSecurityGroupName', desc='cache security group', res_type='cache_security_group', region=region, key=key, keyid=keyid, profile=profile, **args)
[ "def", "delete_cache_security_group", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "args", ")", ":", "return", "_delete_resource", "(", "name", ",", "name_param", "=", "'CacheSecurityGroupName'", ",", "desc", "=", "'cache security group'", ",", "res_type", "=", "'cache_security_group'", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "*", "*", "args", ")" ]
Delete a cache security group. Example: .. code-block:: bash salt myminion boto3_elasticache.delete_cache_security_group myelasticachesg
[ "Delete", "a", "cache", "security", "group", "." ]
python
train
39.230769
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L3205-L3212
def JCXZ(cpu, target): """ Jumps short if CX register is 0. :param cpu: current CPU. :param target: destination operand. """ cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CX == 0, target.read(), cpu.PC)
[ "def", "JCXZ", "(", "cpu", ",", "target", ")", ":", "cpu", ".", "PC", "=", "Operators", ".", "ITEBV", "(", "cpu", ".", "address_bit_size", ",", "cpu", ".", "CX", "==", "0", ",", "target", ".", "read", "(", ")", ",", "cpu", ".", "PC", ")" ]
Jumps short if CX register is 0. :param cpu: current CPU. :param target: destination operand.
[ "Jumps", "short", "if", "CX", "register", "is", "0", "." ]
python
valid
31.125
robin900/gspread-dataframe
gspread_dataframe.py
https://github.com/robin900/gspread-dataframe/blob/b64fef7ec196bfed69362aa35c593f448830a735/gspread_dataframe.py#L118-L135
def get_as_dataframe(worksheet, evaluate_formulas=False, **options): """ Returns the worksheet contents as a DataFrame. :param worksheet: the worksheet. :param evaluate_formulas: if True, get the value of a cell after formula evaluation; otherwise get the formula itself if present. Defaults to False. :param \*\*options: all the options for pandas.io.parsers.TextParser, according to the version of pandas that is installed. (Note: TextParser supports only the default 'python' parser engine, not the C engine.) :returns: pandas.DataFrame """ all_values = _get_all_values(worksheet, evaluate_formulas) return TextParser(all_values, **options).read()
[ "def", "get_as_dataframe", "(", "worksheet", ",", "evaluate_formulas", "=", "False", ",", "*", "*", "options", ")", ":", "all_values", "=", "_get_all_values", "(", "worksheet", ",", "evaluate_formulas", ")", "return", "TextParser", "(", "all_values", ",", "*", "*", "options", ")", ".", "read", "(", ")" ]
Returns the worksheet contents as a DataFrame. :param worksheet: the worksheet. :param evaluate_formulas: if True, get the value of a cell after formula evaluation; otherwise get the formula itself if present. Defaults to False. :param \*\*options: all the options for pandas.io.parsers.TextParser, according to the version of pandas that is installed. (Note: TextParser supports only the default 'python' parser engine, not the C engine.) :returns: pandas.DataFrame
[ "Returns", "the", "worksheet", "contents", "as", "a", "DataFrame", "." ]
python
train
42.888889
eyeseast/python-tablefu
table_fu/formatting.py
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/formatting.py#L163-L173
def ratio(value, decimal_places=0, failure_string='N/A'): """ Converts a floating point value a X:1 ratio. Number of decimal places set by the `precision` kwarg. Default is one. """ try: f = float(value) except ValueError: return failure_string return _saferound(f, decimal_places) + ':1'
[ "def", "ratio", "(", "value", ",", "decimal_places", "=", "0", ",", "failure_string", "=", "'N/A'", ")", ":", "try", ":", "f", "=", "float", "(", "value", ")", "except", "ValueError", ":", "return", "failure_string", "return", "_saferound", "(", "f", ",", "decimal_places", ")", "+", "':1'" ]
Converts a floating point value a X:1 ratio. Number of decimal places set by the `precision` kwarg. Default is one.
[ "Converts", "a", "floating", "point", "value", "a", "X", ":", "1", "ratio", ".", "Number", "of", "decimal", "places", "set", "by", "the", "precision", "kwarg", ".", "Default", "is", "one", "." ]
python
train
29.727273
klmitch/turnstile
turnstile/database.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/database.py#L126-L172
def limit_update(db, key, limits): """ Safely updates the list of limits in the database. :param db: The database handle. :param key: The key the limits are stored under. :param limits: A list or sequence of limit objects, each understanding the dehydrate() method. The limits list currently in the database will be atomically changed to match the new list. This is done using the pipeline() method. """ # Start by dehydrating all the limits desired = [msgpack.dumps(l.dehydrate()) for l in limits] desired_set = set(desired) # Now, let's update the limits with db.pipeline() as pipe: while True: try: # Watch for changes to the key pipe.watch(key) # Look up the existing limits existing = set(pipe.zrange(key, 0, -1)) # Start the transaction... pipe.multi() # Remove limits we no longer have for lim in existing - desired_set: pipe.zrem(key, lim) # Update or add all our desired limits for idx, lim in enumerate(desired): pipe.zadd(key, (idx + 1) * 10, lim) # Execute the transaction pipe.execute() except redis.WatchError: # Try again... continue else: # We're all done! break
[ "def", "limit_update", "(", "db", ",", "key", ",", "limits", ")", ":", "# Start by dehydrating all the limits", "desired", "=", "[", "msgpack", ".", "dumps", "(", "l", ".", "dehydrate", "(", ")", ")", "for", "l", "in", "limits", "]", "desired_set", "=", "set", "(", "desired", ")", "# Now, let's update the limits", "with", "db", ".", "pipeline", "(", ")", "as", "pipe", ":", "while", "True", ":", "try", ":", "# Watch for changes to the key", "pipe", ".", "watch", "(", "key", ")", "# Look up the existing limits", "existing", "=", "set", "(", "pipe", ".", "zrange", "(", "key", ",", "0", ",", "-", "1", ")", ")", "# Start the transaction...", "pipe", ".", "multi", "(", ")", "# Remove limits we no longer have", "for", "lim", "in", "existing", "-", "desired_set", ":", "pipe", ".", "zrem", "(", "key", ",", "lim", ")", "# Update or add all our desired limits", "for", "idx", ",", "lim", "in", "enumerate", "(", "desired", ")", ":", "pipe", ".", "zadd", "(", "key", ",", "(", "idx", "+", "1", ")", "*", "10", ",", "lim", ")", "# Execute the transaction", "pipe", ".", "execute", "(", ")", "except", "redis", ".", "WatchError", ":", "# Try again...", "continue", "else", ":", "# We're all done!", "break" ]
Safely updates the list of limits in the database. :param db: The database handle. :param key: The key the limits are stored under. :param limits: A list or sequence of limit objects, each understanding the dehydrate() method. The limits list currently in the database will be atomically changed to match the new list. This is done using the pipeline() method.
[ "Safely", "updates", "the", "list", "of", "limits", "in", "the", "database", "." ]
python
train
31
python-rope/rope
rope/base/pyscopes.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/pyscopes.py#L22-L26
def get_name(self, name): """Return name `PyName` defined in this scope""" if name not in self.get_names(): raise exceptions.NameNotFoundError('name %s not found' % name) return self.get_names()[name]
[ "def", "get_name", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "get_names", "(", ")", ":", "raise", "exceptions", ".", "NameNotFoundError", "(", "'name %s not found'", "%", "name", ")", "return", "self", ".", "get_names", "(", ")", "[", "name", "]" ]
Return name `PyName` defined in this scope
[ "Return", "name", "PyName", "defined", "in", "this", "scope" ]
python
train
46.4
klen/muffin-metrics
muffin_metrics.py
https://github.com/klen/muffin-metrics/blob/b62fc25172e3e1e9fc6dc6c8da3170935ee69f01/muffin_metrics.py#L111-L117
def send(self, stat, value, backend=None): """Send stat to backend.""" client = yield from self.client(backend) if not client: return False client.send(stat, value) client.disconnect()
[ "def", "send", "(", "self", ",", "stat", ",", "value", ",", "backend", "=", "None", ")", ":", "client", "=", "yield", "from", "self", ".", "client", "(", "backend", ")", "if", "not", "client", ":", "return", "False", "client", ".", "send", "(", "stat", ",", "value", ")", "client", ".", "disconnect", "(", ")" ]
Send stat to backend.
[ "Send", "stat", "to", "backend", "." ]
python
train
32.857143
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L1843-L1854
def daffpa(): """ Find the previous (backward) array in the current DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html :return: True if an array was found. :rtype: bool """ found = ctypes.c_int() libspice.daffpa_c(ctypes.byref(found)) return bool(found.value)
[ "def", "daffpa", "(", ")", ":", "found", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "daffpa_c", "(", "ctypes", ".", "byref", "(", "found", ")", ")", "return", "bool", "(", "found", ".", "value", ")" ]
Find the previous (backward) array in the current DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/daffpa_c.html :return: True if an array was found. :rtype: bool
[ "Find", "the", "previous", "(", "backward", ")", "array", "in", "the", "current", "DAF", "." ]
python
train
25.833333
Yelp/py_zipkin
py_zipkin/encoding/_decoders.py
https://github.com/Yelp/py_zipkin/blob/0944d9a3fb1f1798dbb276694aeed99f2b4283ba/py_zipkin/encoding/_decoders.py#L270-L288
def _write_hex_long(self, data, pos, value): """ Writes an unsigned long value across a byte array. :param data: the buffer to write the value to :type data: bytearray :param pos: the starting position :type pos: int :param value: the value to write :type value: unsigned long """ self._write_hex_byte(data, pos + 0, (value >> 56) & 0xff) self._write_hex_byte(data, pos + 2, (value >> 48) & 0xff) self._write_hex_byte(data, pos + 4, (value >> 40) & 0xff) self._write_hex_byte(data, pos + 6, (value >> 32) & 0xff) self._write_hex_byte(data, pos + 8, (value >> 24) & 0xff) self._write_hex_byte(data, pos + 10, (value >> 16) & 0xff) self._write_hex_byte(data, pos + 12, (value >> 8) & 0xff) self._write_hex_byte(data, pos + 14, (value & 0xff))
[ "def", "_write_hex_long", "(", "self", ",", "data", ",", "pos", ",", "value", ")", ":", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "0", ",", "(", "value", ">>", "56", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "2", ",", "(", "value", ">>", "48", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "4", ",", "(", "value", ">>", "40", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "6", ",", "(", "value", ">>", "32", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "8", ",", "(", "value", ">>", "24", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "10", ",", "(", "value", ">>", "16", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "12", ",", "(", "value", ">>", "8", ")", "&", "0xff", ")", "self", ".", "_write_hex_byte", "(", "data", ",", "pos", "+", "14", ",", "(", "value", "&", "0xff", ")", ")" ]
Writes an unsigned long value across a byte array. :param data: the buffer to write the value to :type data: bytearray :param pos: the starting position :type pos: int :param value: the value to write :type value: unsigned long
[ "Writes", "an", "unsigned", "long", "value", "across", "a", "byte", "array", "." ]
python
test
45.210526
google/openhtf
openhtf/plugs/usb/filesync_service.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L198-L203
def recv(self, filename, dest_file, timeout=None): """Retrieve a file from the device into the file-like dest_file.""" transport = DataFilesyncTransport(self.stream) transport.write_data('RECV', filename, timeout) for data_msg in transport.read_until_done('DATA', timeout): dest_file.write(data_msg.data)
[ "def", "recv", "(", "self", ",", "filename", ",", "dest_file", ",", "timeout", "=", "None", ")", ":", "transport", "=", "DataFilesyncTransport", "(", "self", ".", "stream", ")", "transport", ".", "write_data", "(", "'RECV'", ",", "filename", ",", "timeout", ")", "for", "data_msg", "in", "transport", ".", "read_until_done", "(", "'DATA'", ",", "timeout", ")", ":", "dest_file", ".", "write", "(", "data_msg", ".", "data", ")" ]
Retrieve a file from the device into the file-like dest_file.
[ "Retrieve", "a", "file", "from", "the", "device", "into", "the", "file", "-", "like", "dest_file", "." ]
python
train
53.5
veeti/decent
decent/validators.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L212-L231
def Range(min=None, max=None, min_message="Must be at least {min}", max_message="Must be at most {max}"): """ Creates a validator that checks if the given numeric value is in the specified range, inclusive. Accepts values specified by ``numbers.Number`` only, excluding booleans. The error messages raised can be customized with ``min_message`` and ``max_message``. The ``min`` and ``max`` arguments are formatted. """ @wraps(Range) def built(value): if not isinstance(value, numbers.Number) or isinstance(value, bool): raise Error("Not a number") if min is not None and min > value: raise Error(min_message.format(min=min, max=max)) if max is not None and value > max: raise Error(max_message.format(min=min, max=max)) return value return built
[ "def", "Range", "(", "min", "=", "None", ",", "max", "=", "None", ",", "min_message", "=", "\"Must be at least {min}\"", ",", "max_message", "=", "\"Must be at most {max}\"", ")", ":", "@", "wraps", "(", "Range", ")", "def", "built", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "numbers", ".", "Number", ")", "or", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "Error", "(", "\"Not a number\"", ")", "if", "min", "is", "not", "None", "and", "min", ">", "value", ":", "raise", "Error", "(", "min_message", ".", "format", "(", "min", "=", "min", ",", "max", "=", "max", ")", ")", "if", "max", "is", "not", "None", "and", "value", ">", "max", ":", "raise", "Error", "(", "max_message", ".", "format", "(", "min", "=", "min", ",", "max", "=", "max", ")", ")", "return", "value", "return", "built" ]
Creates a validator that checks if the given numeric value is in the specified range, inclusive. Accepts values specified by ``numbers.Number`` only, excluding booleans. The error messages raised can be customized with ``min_message`` and ``max_message``. The ``min`` and ``max`` arguments are formatted.
[ "Creates", "a", "validator", "that", "checks", "if", "the", "given", "numeric", "value", "is", "in", "the", "specified", "range", "inclusive", "." ]
python
train
41.8
edx/edx-submissions
submissions/api.py
https://github.com/edx/edx-submissions/blob/8d531ca25f7c2886dfcb1bb8febe0910e1433ca2/submissions/api.py#L759-L824
def reset_score(student_id, course_id, item_id, clear_state=False, emit_signal=True): """ Reset scores for a specific student on a specific problem. Note: this does *not* delete `Score` models from the database, since these are immutable. It simply creates a new score with the "reset" flag set to True. Args: student_id (unicode): The ID of the student for whom to reset scores. course_id (unicode): The ID of the course containing the item to reset. item_id (unicode): The ID of the item for which to reset scores. clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem Returns: None Raises: SubmissionInternalError: An unexpected error occurred while resetting scores. """ # Retrieve the student item try: student_item = StudentItem.objects.get( student_id=student_id, course_id=course_id, item_id=item_id ) except StudentItem.DoesNotExist: # If there is no student item, then there is no score to reset, # so we can return immediately. return # Create a "reset" score try: score = Score.create_reset_score(student_item) if emit_signal: # Send a signal out to any listeners who are waiting for scoring events. score_reset.send( sender=None, anonymous_user_id=student_id, course_id=course_id, item_id=item_id, created_at=score.created_at, ) if clear_state: for sub in student_item.submission_set.all(): # soft-delete the Submission sub.status = Submission.DELETED sub.save(update_fields=["status"]) # Also clear out cached values cache_key = Submission.get_cache_key(sub.uuid) cache.delete(cache_key) except DatabaseError: msg = ( u"Error occurred while reseting scores for" u" item {item_id} in course {course_id} for student {student_id}" ).format(item_id=item_id, course_id=course_id, student_id=student_id) logger.exception(msg) raise SubmissionInternalError(msg) else: msg = u"Score reset for item {item_id} in course {course_id} for student {student_id}".format( item_id=item_id, course_id=course_id, student_id=student_id ) logger.info(msg)
[ "def", "reset_score", "(", "student_id", ",", "course_id", ",", "item_id", ",", "clear_state", "=", "False", ",", "emit_signal", "=", "True", ")", ":", "# Retrieve the student item", "try", ":", "student_item", "=", "StudentItem", ".", "objects", ".", "get", "(", "student_id", "=", "student_id", ",", "course_id", "=", "course_id", ",", "item_id", "=", "item_id", ")", "except", "StudentItem", ".", "DoesNotExist", ":", "# If there is no student item, then there is no score to reset,", "# so we can return immediately.", "return", "# Create a \"reset\" score", "try", ":", "score", "=", "Score", ".", "create_reset_score", "(", "student_item", ")", "if", "emit_signal", ":", "# Send a signal out to any listeners who are waiting for scoring events.", "score_reset", ".", "send", "(", "sender", "=", "None", ",", "anonymous_user_id", "=", "student_id", ",", "course_id", "=", "course_id", ",", "item_id", "=", "item_id", ",", "created_at", "=", "score", ".", "created_at", ",", ")", "if", "clear_state", ":", "for", "sub", "in", "student_item", ".", "submission_set", ".", "all", "(", ")", ":", "# soft-delete the Submission", "sub", ".", "status", "=", "Submission", ".", "DELETED", "sub", ".", "save", "(", "update_fields", "=", "[", "\"status\"", "]", ")", "# Also clear out cached values", "cache_key", "=", "Submission", ".", "get_cache_key", "(", "sub", ".", "uuid", ")", "cache", ".", "delete", "(", "cache_key", ")", "except", "DatabaseError", ":", "msg", "=", "(", "u\"Error occurred while reseting scores for\"", "u\" item {item_id} in course {course_id} for student {student_id}\"", ")", ".", "format", "(", "item_id", "=", "item_id", ",", "course_id", "=", "course_id", ",", "student_id", "=", "student_id", ")", "logger", ".", "exception", "(", "msg", ")", "raise", "SubmissionInternalError", "(", "msg", ")", "else", ":", "msg", "=", "u\"Score reset for item {item_id} in course {course_id} for student {student_id}\"", ".", "format", "(", "item_id", "=", "item_id", ",", "course_id", "=", "course_id", ",", "student_id", "=", "student_id", ")", "logger", ".", "info", "(", "msg", ")" ]
Reset scores for a specific student on a specific problem. Note: this does *not* delete `Score` models from the database, since these are immutable. It simply creates a new score with the "reset" flag set to True. Args: student_id (unicode): The ID of the student for whom to reset scores. course_id (unicode): The ID of the course containing the item to reset. item_id (unicode): The ID of the item for which to reset scores. clear_state (bool): If True, will appear to delete any submissions associated with the specified StudentItem Returns: None Raises: SubmissionInternalError: An unexpected error occurred while resetting scores.
[ "Reset", "scores", "for", "a", "specific", "student", "on", "a", "specific", "problem", "." ]
python
train
37.151515
dcos/shakedown
shakedown/dcos/service.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/service.py#L540-L568
def tasks_missing_predicate( service_name, old_task_ids, task_predicate=None ): """ Returns whether any of old_task_ids are no longer present :param service_name: the service name :type service_name: str :param old_task_ids: list of original task ids as returned by get_service_task_ids :type old_task_ids: [str] :param task_predicate: filter to use when searching for tasks :type task_predicate: func :return: True if any of old_task_ids are no longer present in the service :rtype: bool """ try: task_ids = get_service_task_ids(service_name, task_predicate) except DCOSHTTPException: print('failed to get task ids for service {}'.format(service_name)) task_ids = [] print('checking whether old tasks in "{}" are missing:\n- old tasks: {}\n- current tasks: {}'.format( service_name, old_task_ids, task_ids)) for id in old_task_ids: if id not in task_ids: return True # an old task was not present return False
[ "def", "tasks_missing_predicate", "(", "service_name", ",", "old_task_ids", ",", "task_predicate", "=", "None", ")", ":", "try", ":", "task_ids", "=", "get_service_task_ids", "(", "service_name", ",", "task_predicate", ")", "except", "DCOSHTTPException", ":", "print", "(", "'failed to get task ids for service {}'", ".", "format", "(", "service_name", ")", ")", "task_ids", "=", "[", "]", "print", "(", "'checking whether old tasks in \"{}\" are missing:\\n- old tasks: {}\\n- current tasks: {}'", ".", "format", "(", "service_name", ",", "old_task_ids", ",", "task_ids", ")", ")", "for", "id", "in", "old_task_ids", ":", "if", "id", "not", "in", "task_ids", ":", "return", "True", "# an old task was not present", "return", "False" ]
Returns whether any of old_task_ids are no longer present :param service_name: the service name :type service_name: str :param old_task_ids: list of original task ids as returned by get_service_task_ids :type old_task_ids: [str] :param task_predicate: filter to use when searching for tasks :type task_predicate: func :return: True if any of old_task_ids are no longer present in the service :rtype: bool
[ "Returns", "whether", "any", "of", "old_task_ids", "are", "no", "longer", "present" ]
python
train
36.344828
census-instrumentation/opencensus-python
opencensus/trace/span.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L163-L175
def span(self, name='child_span'): """Create a child span for the current span and append it to the child spans list. :type name: str :param name: (Optional) The name of the child span. :rtype: :class: `~opencensus.trace.span.Span` :returns: A child Span to be added to the current span. """ child_span = Span(name, parent_span=self) self._child_spans.append(child_span) return child_span
[ "def", "span", "(", "self", ",", "name", "=", "'child_span'", ")", ":", "child_span", "=", "Span", "(", "name", ",", "parent_span", "=", "self", ")", "self", ".", "_child_spans", ".", "append", "(", "child_span", ")", "return", "child_span" ]
Create a child span for the current span and append it to the child spans list. :type name: str :param name: (Optional) The name of the child span. :rtype: :class: `~opencensus.trace.span.Span` :returns: A child Span to be added to the current span.
[ "Create", "a", "child", "span", "for", "the", "current", "span", "and", "append", "it", "to", "the", "child", "spans", "list", "." ]
python
train
35.230769
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L2603-L2607
def _get_data(self, read_size): """Get data from the character device.""" if NIX: return super(Mouse, self)._get_data(read_size) return self._pipe.recv_bytes()
[ "def", "_get_data", "(", "self", ",", "read_size", ")", ":", "if", "NIX", ":", "return", "super", "(", "Mouse", ",", "self", ")", ".", "_get_data", "(", "read_size", ")", "return", "self", ".", "_pipe", ".", "recv_bytes", "(", ")" ]
Get data from the character device.
[ "Get", "data", "from", "the", "character", "device", "." ]
python
train
38.2
vmware/pyvmomi
pyVmomi/SoapAdapter.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/SoapAdapter.py#L272-L274
def SerializeFaultDetail(self, val, info): """ Serialize an object """ self._SerializeDataObject(val, info, ' xsi:typ="{1}"'.format(val._wsdlName), self.defaultNS)
[ "def", "SerializeFaultDetail", "(", "self", ",", "val", ",", "info", ")", ":", "self", ".", "_SerializeDataObject", "(", "val", ",", "info", ",", "' xsi:typ=\"{1}\"'", ".", "format", "(", "val", ".", "_wsdlName", ")", ",", "self", ".", "defaultNS", ")" ]
Serialize an object
[ "Serialize", "an", "object" ]
python
train
57.666667
dev-pipeline/dev-pipeline-git
lib/devpipeline_git/git.py
https://github.com/dev-pipeline/dev-pipeline-git/blob/b604f1f89402502b8ad858f4f834baa9467ef380/lib/devpipeline_git/git.py#L126-L134
def update(self, repo_dir, **kwargs): """This function updates an existing checkout of source code.""" del kwargs rev = self._args.get("revision") if rev: return [{"args": ["git", "checkout", rev], "cwd": repo_dir}] + _ff_command( rev, repo_dir ) return None
[ "def", "update", "(", "self", ",", "repo_dir", ",", "*", "*", "kwargs", ")", ":", "del", "kwargs", "rev", "=", "self", ".", "_args", ".", "get", "(", "\"revision\"", ")", "if", "rev", ":", "return", "[", "{", "\"args\"", ":", "[", "\"git\"", ",", "\"checkout\"", ",", "rev", "]", ",", "\"cwd\"", ":", "repo_dir", "}", "]", "+", "_ff_command", "(", "rev", ",", "repo_dir", ")", "return", "None" ]
This function updates an existing checkout of source code.
[ "This", "function", "updates", "an", "existing", "checkout", "of", "source", "code", "." ]
python
train
36.666667
materialsproject/pymatgen
pymatgen/analysis/gb/grain.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/gb/grain.py#L1172-L1247
def enum_sigma_cubic(cutoff, r_axis): """ Find all possible sigma values and corresponding rotation angles within a sigma value cutoff with known rotation axis in cubic system. The algorithm for this code is from reference, Acta Cryst, A40,108(1984) Args: cutoff (integer): the cutoff of sigma values. r_axis (list of three integers, e.g. u, v, w): the rotation axis of the grain boundary, with the format of [u,v,w]. Returns: sigmas (dict): dictionary with keys as the possible integer sigma values and values as list of the possible rotation angles to the corresponding sigma values. e.g. the format as {sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...} Note: the angles are the rotation angles of one grain respect to the other grain. When generate the microstructures of the grain boundary using these angles, you need to analyze the symmetry of the structure. Different angles may result in equivalent microstructures. """ sigmas = {} # make sure gcd(r_axis)==1 if reduce(gcd, r_axis) != 1: r_axis = [int(round(x / reduce(gcd, r_axis))) for x in r_axis] # count the number of odds in r_axis odd_r = len(list(filter(lambda x: x % 2 == 1, r_axis))) # Compute the max n we need to enumerate. if odd_r == 3: a_max = 4 elif odd_r == 0: a_max = 1 else: a_max = 2 n_max = int(np.sqrt(cutoff * a_max / sum(np.array(r_axis) ** 2))) # enumerate all possible n, m to give possible sigmas within the cutoff. for n_loop in range(1, n_max + 1): n = n_loop m_max = int(np.sqrt(cutoff * a_max - n ** 2 * sum(np.array(r_axis) ** 2))) for m in range(0, m_max + 1): if gcd(m, n) == 1 or m == 0: if m == 0: n = 1 else: n = n_loop # construct the quadruple [m, U,V,W], count the number of odds in # quadruple to determine the parameter a, refer to the reference quadruple = [m] + [x * n for x in r_axis] odd_qua = len(list(filter(lambda x: x % 2 == 1, quadruple))) if odd_qua == 4: a = 4 elif odd_qua == 2: a = 2 else: a = 1 sigma = int(round((m ** 2 + n ** 2 * sum(np.array(r_axis) ** 2)) / a)) if (sigma <= cutoff) and (sigma > 1): if sigma not in list(sigmas.keys()): if m == 0: angle = 180.0 else: angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \ / np.pi * 180 sigmas[sigma] = [angle] else: if m == 0: angle = 180.0 else: angle = 2 * np.arctan(n * np.sqrt(sum(np.array(r_axis) ** 2)) / m) \ / np.pi * 180 if angle not in sigmas[sigma]: sigmas[sigma].append(angle) return sigmas
[ "def", "enum_sigma_cubic", "(", "cutoff", ",", "r_axis", ")", ":", "sigmas", "=", "{", "}", "# make sure gcd(r_axis)==1", "if", "reduce", "(", "gcd", ",", "r_axis", ")", "!=", "1", ":", "r_axis", "=", "[", "int", "(", "round", "(", "x", "/", "reduce", "(", "gcd", ",", "r_axis", ")", ")", ")", "for", "x", "in", "r_axis", "]", "# count the number of odds in r_axis", "odd_r", "=", "len", "(", "list", "(", "filter", "(", "lambda", "x", ":", "x", "%", "2", "==", "1", ",", "r_axis", ")", ")", ")", "# Compute the max n we need to enumerate.", "if", "odd_r", "==", "3", ":", "a_max", "=", "4", "elif", "odd_r", "==", "0", ":", "a_max", "=", "1", "else", ":", "a_max", "=", "2", "n_max", "=", "int", "(", "np", ".", "sqrt", "(", "cutoff", "*", "a_max", "/", "sum", "(", "np", ".", "array", "(", "r_axis", ")", "**", "2", ")", ")", ")", "# enumerate all possible n, m to give possible sigmas within the cutoff.", "for", "n_loop", "in", "range", "(", "1", ",", "n_max", "+", "1", ")", ":", "n", "=", "n_loop", "m_max", "=", "int", "(", "np", ".", "sqrt", "(", "cutoff", "*", "a_max", "-", "n", "**", "2", "*", "sum", "(", "np", ".", "array", "(", "r_axis", ")", "**", "2", ")", ")", ")", "for", "m", "in", "range", "(", "0", ",", "m_max", "+", "1", ")", ":", "if", "gcd", "(", "m", ",", "n", ")", "==", "1", "or", "m", "==", "0", ":", "if", "m", "==", "0", ":", "n", "=", "1", "else", ":", "n", "=", "n_loop", "# construct the quadruple [m, U,V,W], count the number of odds in", "# quadruple to determine the parameter a, refer to the reference", "quadruple", "=", "[", "m", "]", "+", "[", "x", "*", "n", "for", "x", "in", "r_axis", "]", "odd_qua", "=", "len", "(", "list", "(", "filter", "(", "lambda", "x", ":", "x", "%", "2", "==", "1", ",", "quadruple", ")", ")", ")", "if", "odd_qua", "==", "4", ":", "a", "=", "4", "elif", "odd_qua", "==", "2", ":", "a", "=", "2", "else", ":", "a", "=", "1", "sigma", "=", "int", "(", "round", "(", "(", "m", "**", "2", "+", "n", "**", "2", "*", "sum", "(", "np", ".", "array", "(", "r_axis", ")", "**", "2", ")", ")", "/", "a", ")", ")", "if", "(", "sigma", "<=", "cutoff", ")", "and", "(", "sigma", ">", "1", ")", ":", "if", "sigma", "not", "in", "list", "(", "sigmas", ".", "keys", "(", ")", ")", ":", "if", "m", "==", "0", ":", "angle", "=", "180.0", "else", ":", "angle", "=", "2", "*", "np", ".", "arctan", "(", "n", "*", "np", ".", "sqrt", "(", "sum", "(", "np", ".", "array", "(", "r_axis", ")", "**", "2", ")", ")", "/", "m", ")", "/", "np", ".", "pi", "*", "180", "sigmas", "[", "sigma", "]", "=", "[", "angle", "]", "else", ":", "if", "m", "==", "0", ":", "angle", "=", "180.0", "else", ":", "angle", "=", "2", "*", "np", ".", "arctan", "(", "n", "*", "np", ".", "sqrt", "(", "sum", "(", "np", ".", "array", "(", "r_axis", ")", "**", "2", ")", ")", "/", "m", ")", "/", "np", ".", "pi", "*", "180", "if", "angle", "not", "in", "sigmas", "[", "sigma", "]", ":", "sigmas", "[", "sigma", "]", ".", "append", "(", "angle", ")", "return", "sigmas" ]
Find all possible sigma values and corresponding rotation angles within a sigma value cutoff with known rotation axis in cubic system. The algorithm for this code is from reference, Acta Cryst, A40,108(1984) Args: cutoff (integer): the cutoff of sigma values. r_axis (list of three integers, e.g. u, v, w): the rotation axis of the grain boundary, with the format of [u,v,w]. Returns: sigmas (dict): dictionary with keys as the possible integer sigma values and values as list of the possible rotation angles to the corresponding sigma values. e.g. the format as {sigma1: [angle11,angle12,...], sigma2: [angle21, angle22,...],...} Note: the angles are the rotation angles of one grain respect to the other grain. When generate the microstructures of the grain boundary using these angles, you need to analyze the symmetry of the structure. Different angles may result in equivalent microstructures.
[ "Find", "all", "possible", "sigma", "values", "and", "corresponding", "rotation", "angles", "within", "a", "sigma", "value", "cutoff", "with", "known", "rotation", "axis", "in", "cubic", "system", ".", "The", "algorithm", "for", "this", "code", "is", "from", "reference", "Acta", "Cryst", "A40", "108", "(", "1984", ")", "Args", ":", "cutoff", "(", "integer", ")", ":", "the", "cutoff", "of", "sigma", "values", ".", "r_axis", "(", "list", "of", "three", "integers", "e", ".", "g", ".", "u", "v", "w", ")", ":", "the", "rotation", "axis", "of", "the", "grain", "boundary", "with", "the", "format", "of", "[", "u", "v", "w", "]", ".", "Returns", ":", "sigmas", "(", "dict", ")", ":", "dictionary", "with", "keys", "as", "the", "possible", "integer", "sigma", "values", "and", "values", "as", "list", "of", "the", "possible", "rotation", "angles", "to", "the", "corresponding", "sigma", "values", ".", "e", ".", "g", ".", "the", "format", "as", "{", "sigma1", ":", "[", "angle11", "angle12", "...", "]", "sigma2", ":", "[", "angle21", "angle22", "...", "]", "...", "}", "Note", ":", "the", "angles", "are", "the", "rotation", "angles", "of", "one", "grain", "respect", "to", "the", "other", "grain", ".", "When", "generate", "the", "microstructures", "of", "the", "grain", "boundary", "using", "these", "angles", "you", "need", "to", "analyze", "the", "symmetry", "of", "the", "structure", ".", "Different", "angles", "may", "result", "in", "equivalent", "microstructures", "." ]
python
train
47.907895