repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
minhhoit/yacms
yacms/core/templatetags/yacms_tags.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/core/templatetags/yacms_tags.py#L121-L133
def sort_by(items, attr): """ General sort filter - sorts by either attribute or key. """ def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) # Reraise AttributeError return sorted(items, key=key_func)
[ "def", "sort_by", "(", "items", ",", "attr", ")", ":", "def", "key_func", "(", "item", ")", ":", "try", ":", "return", "getattr", "(", "item", ",", "attr", ")", "except", "AttributeError", ":", "try", ":", "return", "item", "[", "attr", "]", "except", "TypeError", ":", "getattr", "(", "item", ",", "attr", ")", "# Reraise AttributeError", "return", "sorted", "(", "items", ",", "key", "=", "key_func", ")" ]
General sort filter - sorts by either attribute or key.
[ "General", "sort", "filter", "-", "sorts", "by", "either", "attribute", "or", "key", "." ]
python
train
29.076923
apple/turicreate
src/unity/python/turicreate/util/__init__.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/__init__.py#L394-L405
def _make_temp_filename(prefix): ''' Generate a temporary file that would not live beyond the lifetime of unity_server. Caller is expected to clean up the temp file as soon as the file is no longer needed. But temp files created using this method will be cleaned up when unity_server restarts ''' temp_location = _get_temp_file_location() temp_file_name = '/'.join([temp_location, str(prefix)+str(_uuid.uuid4())]) return temp_file_name
[ "def", "_make_temp_filename", "(", "prefix", ")", ":", "temp_location", "=", "_get_temp_file_location", "(", ")", "temp_file_name", "=", "'/'", ".", "join", "(", "[", "temp_location", ",", "str", "(", "prefix", ")", "+", "str", "(", "_uuid", ".", "uuid4", "(", ")", ")", "]", ")", "return", "temp_file_name" ]
Generate a temporary file that would not live beyond the lifetime of unity_server. Caller is expected to clean up the temp file as soon as the file is no longer needed. But temp files created using this method will be cleaned up when unity_server restarts
[ "Generate", "a", "temporary", "file", "that", "would", "not", "live", "beyond", "the", "lifetime", "of", "unity_server", "." ]
python
train
38.75
django-parler/django-parler
parler/views.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/views.py#L243-L253
def get_language_tabs(self): """ Determine the language tabs to show. """ current_language = self.get_current_language() if self.object: available_languages = list(self.object.get_available_languages()) else: available_languages = [] return get_language_tabs(self.request, current_language, available_languages)
[ "def", "get_language_tabs", "(", "self", ")", ":", "current_language", "=", "self", ".", "get_current_language", "(", ")", "if", "self", ".", "object", ":", "available_languages", "=", "list", "(", "self", ".", "object", ".", "get_available_languages", "(", ")", ")", "else", ":", "available_languages", "=", "[", "]", "return", "get_language_tabs", "(", "self", ".", "request", ",", "current_language", ",", "available_languages", ")" ]
Determine the language tabs to show.
[ "Determine", "the", "language", "tabs", "to", "show", "." ]
python
train
34.727273
zhmcclient/python-zhmcclient
zhmcclient/_user_role.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_user_role.py#L148-L186
def create(self, properties): """ Create a new (user-defined) User Role in this HMC. Authorization requirements: * Task permission to the "Manage User Roles" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create User Role' in the :term:`HMC API` book. Returns: UserRole: The resource object for the new User Role. The object will have its 'object-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ result = self.session.post(self.console.uri + '/user-roles', body=properties) # There should not be overlaps, but just in case there are, the # returned props should overwrite the input props: props = copy.deepcopy(properties) props.update(result) name = props.get(self._name_prop, None) uri = props[self._uri_prop] user_role = UserRole(self, uri, name, props) self._name_uri_cache.update(name, uri) return user_role
[ "def", "create", "(", "self", ",", "properties", ")", ":", "result", "=", "self", ".", "session", ".", "post", "(", "self", ".", "console", ".", "uri", "+", "'/user-roles'", ",", "body", "=", "properties", ")", "# There should not be overlaps, but just in case there are, the", "# returned props should overwrite the input props:", "props", "=", "copy", ".", "deepcopy", "(", "properties", ")", "props", ".", "update", "(", "result", ")", "name", "=", "props", ".", "get", "(", "self", ".", "_name_prop", ",", "None", ")", "uri", "=", "props", "[", "self", ".", "_uri_prop", "]", "user_role", "=", "UserRole", "(", "self", ",", "uri", ",", "name", ",", "props", ")", "self", ".", "_name_uri_cache", ".", "update", "(", "name", ",", "uri", ")", "return", "user_role" ]
Create a new (user-defined) User Role in this HMC. Authorization requirements: * Task permission to the "Manage User Roles" task. Parameters: properties (dict): Initial property values. Allowable properties are defined in section 'Request body contents' in section 'Create User Role' in the :term:`HMC API` book. Returns: UserRole: The resource object for the new User Role. The object will have its 'object-uri' property set as returned by the HMC, and will also have the input properties set. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
[ "Create", "a", "new", "(", "user", "-", "defined", ")", "User", "Role", "in", "this", "HMC", "." ]
python
train
34.589744
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikitext103.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikitext103.py#L62-L104
def _maybe_download_corpus(tmp_dir, vocab_type): """Download and unpack the corpus. Args: tmp_dir: directory containing dataset. vocab_type: which vocabulary are we using. Returns: The list of names of files. """ if vocab_type == text_problems.VocabType.CHARACTER: dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" "/wikitext-103-raw-v1.zip") dir_name = "wikitext-103-raw" else: dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" "/wikitext-103-v1.zip") dir_name = "wikitext-103" fname = os.path.basename(dataset_url) compressed_filepath = generator_utils.maybe_download(tmp_dir, fname, dataset_url) zip_ref = zipfile.ZipFile(compressed_filepath, "r") zip_ref.extractall(tmp_dir) zip_ref.close() files = os.path.join(tmp_dir, dir_name, "*") train_file, valid_file, test_file = None, None, None for f in tf.gfile.Glob(files): fname = os.path.basename(f) if "train" in fname: train_file = f elif "valid" in fname: valid_file = f elif "test" in fname: test_file = f assert train_file, "Training file not found" assert valid_file, "Validation file not found" assert test_file, "Testing file not found" return train_file, valid_file, test_file
[ "def", "_maybe_download_corpus", "(", "tmp_dir", ",", "vocab_type", ")", ":", "if", "vocab_type", "==", "text_problems", ".", "VocabType", ".", "CHARACTER", ":", "dataset_url", "=", "(", "\"https://s3.amazonaws.com/research.metamind.io/wikitext\"", "\"/wikitext-103-raw-v1.zip\"", ")", "dir_name", "=", "\"wikitext-103-raw\"", "else", ":", "dataset_url", "=", "(", "\"https://s3.amazonaws.com/research.metamind.io/wikitext\"", "\"/wikitext-103-v1.zip\"", ")", "dir_name", "=", "\"wikitext-103\"", "fname", "=", "os", ".", "path", ".", "basename", "(", "dataset_url", ")", "compressed_filepath", "=", "generator_utils", ".", "maybe_download", "(", "tmp_dir", ",", "fname", ",", "dataset_url", ")", "zip_ref", "=", "zipfile", ".", "ZipFile", "(", "compressed_filepath", ",", "\"r\"", ")", "zip_ref", ".", "extractall", "(", "tmp_dir", ")", "zip_ref", ".", "close", "(", ")", "files", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "dir_name", ",", "\"*\"", ")", "train_file", ",", "valid_file", ",", "test_file", "=", "None", ",", "None", ",", "None", "for", "f", "in", "tf", ".", "gfile", ".", "Glob", "(", "files", ")", ":", "fname", "=", "os", ".", "path", ".", "basename", "(", "f", ")", "if", "\"train\"", "in", "fname", ":", "train_file", "=", "f", "elif", "\"valid\"", "in", "fname", ":", "valid_file", "=", "f", "elif", "\"test\"", "in", "fname", ":", "test_file", "=", "f", "assert", "train_file", ",", "\"Training file not found\"", "assert", "valid_file", ",", "\"Validation file not found\"", "assert", "test_file", ",", "\"Testing file not found\"", "return", "train_file", ",", "valid_file", ",", "test_file" ]
Download and unpack the corpus. Args: tmp_dir: directory containing dataset. vocab_type: which vocabulary are we using. Returns: The list of names of files.
[ "Download", "and", "unpack", "the", "corpus", "." ]
python
train
31.093023
ets-labs/python-dependency-injector
examples/providers/dependency.py
https://github.com/ets-labs/python-dependency-injector/blob/d04fe41eb17f667da38b97525e2d16c8f2d272fe/examples/providers/dependency.py#L34-L38
def create(self, name): """Create user with provided name and return his id.""" with contextlib.closing(self.database.cursor()) as cursor: cursor.execute('INSERT INTO users(name) VALUES (?)', (name,)) return cursor.lastrowid
[ "def", "create", "(", "self", ",", "name", ")", ":", "with", "contextlib", ".", "closing", "(", "self", ".", "database", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "'INSERT INTO users(name) VALUES (?)'", ",", "(", "name", ",", ")", ")", "return", "cursor", ".", "lastrowid" ]
Create user with provided name and return his id.
[ "Create", "user", "with", "provided", "name", "and", "return", "his", "id", "." ]
python
train
52
sorgerlab/indra
indra/literature/pubmed_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/pubmed_client.py#L106-L127
def get_id_count(search_term): """Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails. """ params = {'term': search_term, 'rettype': 'count', 'db': 'pubmed'} tree = send_request(pubmed_search, params) if tree is None: return None else: count = tree.getchildren()[0].text return int(count)
[ "def", "get_id_count", "(", "search_term", ")", ":", "params", "=", "{", "'term'", ":", "search_term", ",", "'rettype'", ":", "'count'", ",", "'db'", ":", "'pubmed'", "}", "tree", "=", "send_request", "(", "pubmed_search", ",", "params", ")", "if", "tree", "is", "None", ":", "return", "None", "else", ":", "count", "=", "tree", ".", "getchildren", "(", ")", "[", "0", "]", ".", "text", "return", "int", "(", "count", ")" ]
Get the number of citations in Pubmed for a search query. Parameters ---------- search_term : str A term for which the PubMed search should be performed. Returns ------- int or None The number of citations for the query, or None if the query fails.
[ "Get", "the", "number", "of", "citations", "in", "Pubmed", "for", "a", "search", "query", "." ]
python
train
26.409091
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L245-L250
def put_manifest(self, manifest): """Store the manifest.""" logger.debug("Putting manifest") text = json.dumps(manifest, indent=2, sort_keys=True) key = self.get_manifest_key() self.put_text(key, text)
[ "def", "put_manifest", "(", "self", ",", "manifest", ")", ":", "logger", ".", "debug", "(", "\"Putting manifest\"", ")", "text", "=", "json", ".", "dumps", "(", "manifest", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", "key", "=", "self", ".", "get_manifest_key", "(", ")", "self", ".", "put_text", "(", "key", ",", "text", ")" ]
Store the manifest.
[ "Store", "the", "manifest", "." ]
python
train
39.333333
0compute/xtraceback
xtraceback/xtraceback.py
https://github.com/0compute/xtraceback/blob/5f4ae11cf21e6eea830d79aed66d3cd91bd013cd/xtraceback/xtraceback.py#L115-L120
def color(self): """ Whether or not color should be output """ return self.tty_stream if self.options.color is None \ else self.options.color
[ "def", "color", "(", "self", ")", ":", "return", "self", ".", "tty_stream", "if", "self", ".", "options", ".", "color", "is", "None", "else", "self", ".", "options", ".", "color" ]
Whether or not color should be output
[ "Whether", "or", "not", "color", "should", "be", "output" ]
python
train
30
codenerix/django-codenerix
codenerix/views.py
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/views.py#L3532-L3668
def get_filled_structure(self, subgroup=None): ''' method in charged of filling an structure containing the object fields values taking into account the 'group' attribute from the corresponding form object, which is necesary to fill the details form as it is configured in the 'group' attribute ''' # initilize the result structure result = [] # the object corresponding model content is taken into a dictionary object_content = model_to_dict(self.object) # generallically some common or specific fields are not interesting if 'exclude_fields' not in dir(self): self.exclude_fields = [] self.exclude_fields.append("id") for field in (self.exclude_fields): if field in object_content.keys(): object_content.pop(field) # following is going to be created an structure with the appropieate caption # for every existing field in the current model verbose_names = {} for field in object_content.keys(): verbose_names[field] = self.model._meta.get_field(field).verbose_name # the found fields in the groups structure are going to be taked into account gr_object_content = [] if subgroup: group_array = subgroup else: group_array = self.groups for group in group_array: # raise Exception(group) item = {} item["name"] = smart_text(group[0]) item["col"] = group[1] item_elements = group[2:] sublist = [] idx = 0 for item_element in item_elements: # the element can contains another groups if (idx > 1) and (type(item_element) == tuple): # Recursive sublist.append(self.get_filled_structure([subgroup])) else: filter_field = None # Check if it is a list if type(item_element) == list: # if it is a list, that means that can be found the # corresponding values for colums and any other field = item_element[0] # take into account that field caption can be passed as # third list element if len(item_element) >= 3 and item_element[2]: verbose_names[field] = _(item_element[2]) if len(item_element) >= 9: filter_field = item_element[8] else: field = item_element if field not in verbose_names: if field.startswith('get_') and field.endswith('_display'): label_field = remove_getdisplay(field) if self.model: try: verbose_names[field] = self.model._meta.get_field(label_field).verbose_name except FieldDoesNotExist: verbose_names[field] = _(label_field) else: verbose_names[field] = _(label_field) else: label_field = field verbose_names[field] = _(label_field) args = {} value = None for field_split in field.split('__'): if value is None: try: verbose_names[field] = self.object._meta.get_field(field_split).verbose_name except AttributeError: pass except FieldDoesNotExist: pass value = getattr(self.object, field_split, None) else: try: verbose_names[field] = value._meta.get_field(field_split).verbose_name except AttributeError: pass except FieldDoesNotExist: pass value = getattr(value, field_split, None) if callable(value): # if 'request' in value.func_code.co_varnames: related = (getattr(value, 'all', None) is not None) if related: value = ", ".join([str(x) for x in value.all()]) else: if 'request' in value.__code__.co_varnames: args['request'] = self.request # Call the method value = value(**args) sublist.append({ "name": _(verbose_names[field]), "value": value, "filter": filter_field, }) gr_object_content.append(field) # Increment index idx += 1 item["value"] = sublist result.append(item) for field in object_content.keys(): item = {} if field not in gr_object_content: item["name"] = _(verbose_names[field]) item["value"] = getattr(self.object, field) result.append(item) return result
[ "def", "get_filled_structure", "(", "self", ",", "subgroup", "=", "None", ")", ":", "# initilize the result structure", "result", "=", "[", "]", "# the object corresponding model content is taken into a dictionary", "object_content", "=", "model_to_dict", "(", "self", ".", "object", ")", "# generallically some common or specific fields are not interesting", "if", "'exclude_fields'", "not", "in", "dir", "(", "self", ")", ":", "self", ".", "exclude_fields", "=", "[", "]", "self", ".", "exclude_fields", ".", "append", "(", "\"id\"", ")", "for", "field", "in", "(", "self", ".", "exclude_fields", ")", ":", "if", "field", "in", "object_content", ".", "keys", "(", ")", ":", "object_content", ".", "pop", "(", "field", ")", "# following is going to be created an structure with the appropieate caption", "# for every existing field in the current model", "verbose_names", "=", "{", "}", "for", "field", "in", "object_content", ".", "keys", "(", ")", ":", "verbose_names", "[", "field", "]", "=", "self", ".", "model", ".", "_meta", ".", "get_field", "(", "field", ")", ".", "verbose_name", "# the found fields in the groups structure are going to be taked into account", "gr_object_content", "=", "[", "]", "if", "subgroup", ":", "group_array", "=", "subgroup", "else", ":", "group_array", "=", "self", ".", "groups", "for", "group", "in", "group_array", ":", "# raise Exception(group)", "item", "=", "{", "}", "item", "[", "\"name\"", "]", "=", "smart_text", "(", "group", "[", "0", "]", ")", "item", "[", "\"col\"", "]", "=", "group", "[", "1", "]", "item_elements", "=", "group", "[", "2", ":", "]", "sublist", "=", "[", "]", "idx", "=", "0", "for", "item_element", "in", "item_elements", ":", "# the element can contains another groups", "if", "(", "idx", ">", "1", ")", "and", "(", "type", "(", "item_element", ")", "==", "tuple", ")", ":", "# Recursive", "sublist", ".", "append", "(", "self", ".", "get_filled_structure", "(", "[", "subgroup", "]", ")", ")", "else", ":", "filter_field", "=", "None", "# Check if it is a list", "if", "type", "(", "item_element", ")", "==", "list", ":", "# if it is a list, that means that can be found the", "# corresponding values for colums and any other", "field", "=", "item_element", "[", "0", "]", "# take into account that field caption can be passed as", "# third list element", "if", "len", "(", "item_element", ")", ">=", "3", "and", "item_element", "[", "2", "]", ":", "verbose_names", "[", "field", "]", "=", "_", "(", "item_element", "[", "2", "]", ")", "if", "len", "(", "item_element", ")", ">=", "9", ":", "filter_field", "=", "item_element", "[", "8", "]", "else", ":", "field", "=", "item_element", "if", "field", "not", "in", "verbose_names", ":", "if", "field", ".", "startswith", "(", "'get_'", ")", "and", "field", ".", "endswith", "(", "'_display'", ")", ":", "label_field", "=", "remove_getdisplay", "(", "field", ")", "if", "self", ".", "model", ":", "try", ":", "verbose_names", "[", "field", "]", "=", "self", ".", "model", ".", "_meta", ".", "get_field", "(", "label_field", ")", ".", "verbose_name", "except", "FieldDoesNotExist", ":", "verbose_names", "[", "field", "]", "=", "_", "(", "label_field", ")", "else", ":", "verbose_names", "[", "field", "]", "=", "_", "(", "label_field", ")", "else", ":", "label_field", "=", "field", "verbose_names", "[", "field", "]", "=", "_", "(", "label_field", ")", "args", "=", "{", "}", "value", "=", "None", "for", "field_split", "in", "field", ".", "split", "(", "'__'", ")", ":", "if", "value", "is", "None", ":", "try", ":", "verbose_names", "[", "field", "]", "=", "self", ".", "object", ".", "_meta", ".", "get_field", "(", "field_split", ")", ".", "verbose_name", "except", "AttributeError", ":", "pass", "except", "FieldDoesNotExist", ":", "pass", "value", "=", "getattr", "(", "self", ".", "object", ",", "field_split", ",", "None", ")", "else", ":", "try", ":", "verbose_names", "[", "field", "]", "=", "value", ".", "_meta", ".", "get_field", "(", "field_split", ")", ".", "verbose_name", "except", "AttributeError", ":", "pass", "except", "FieldDoesNotExist", ":", "pass", "value", "=", "getattr", "(", "value", ",", "field_split", ",", "None", ")", "if", "callable", "(", "value", ")", ":", "# if 'request' in value.func_code.co_varnames:", "related", "=", "(", "getattr", "(", "value", ",", "'all'", ",", "None", ")", "is", "not", "None", ")", "if", "related", ":", "value", "=", "\", \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "value", ".", "all", "(", ")", "]", ")", "else", ":", "if", "'request'", "in", "value", ".", "__code__", ".", "co_varnames", ":", "args", "[", "'request'", "]", "=", "self", ".", "request", "# Call the method", "value", "=", "value", "(", "*", "*", "args", ")", "sublist", ".", "append", "(", "{", "\"name\"", ":", "_", "(", "verbose_names", "[", "field", "]", ")", ",", "\"value\"", ":", "value", ",", "\"filter\"", ":", "filter_field", ",", "}", ")", "gr_object_content", ".", "append", "(", "field", ")", "# Increment index", "idx", "+=", "1", "item", "[", "\"value\"", "]", "=", "sublist", "result", ".", "append", "(", "item", ")", "for", "field", "in", "object_content", ".", "keys", "(", ")", ":", "item", "=", "{", "}", "if", "field", "not", "in", "gr_object_content", ":", "item", "[", "\"name\"", "]", "=", "_", "(", "verbose_names", "[", "field", "]", ")", "item", "[", "\"value\"", "]", "=", "getattr", "(", "self", ".", "object", ",", "field", ")", "result", ".", "append", "(", "item", ")", "return", "result" ]
method in charged of filling an structure containing the object fields values taking into account the 'group' attribute from the corresponding form object, which is necesary to fill the details form as it is configured in the 'group' attribute
[ "method", "in", "charged", "of", "filling", "an", "structure", "containing", "the", "object", "fields", "values", "taking", "into", "account", "the", "group", "attribute", "from", "the", "corresponding", "form", "object", "which", "is", "necesary", "to", "fill", "the", "details", "form", "as", "it", "is", "configured", "in", "the", "group", "attribute" ]
python
train
40.80292
IntegralDefense/vxstreamlib
bin/vxstreamlib.py
https://github.com/IntegralDefense/vxstreamlib/blob/cd82e3975215085cf929c5976f37083b9a3ac869/bin/vxstreamlib.py#L185-L189
def wait(self): """Waits for all submitted jobs to complete.""" logging.info("waiting for {} jobs to complete".format(len(self.submissions))) while not self.shutdown: time.sleep(1)
[ "def", "wait", "(", "self", ")", ":", "logging", ".", "info", "(", "\"waiting for {} jobs to complete\"", ".", "format", "(", "len", "(", "self", ".", "submissions", ")", ")", ")", "while", "not", "self", ".", "shutdown", ":", "time", ".", "sleep", "(", "1", ")" ]
Waits for all submitted jobs to complete.
[ "Waits", "for", "all", "submitted", "jobs", "to", "complete", "." ]
python
train
42.4
xu2243051/easyui-menu
easyui/utils.py
https://github.com/xu2243051/easyui-menu/blob/4da0b50cf2d3ddb0f1ec7a4da65fd3c4339f8dfb/easyui/utils.py#L21-L48
def register_views(app_name, view_filename, urlpatterns=None): """ app_name APP名 view_filename views 所在的文件 urlpatterns url中已经存在的urlpatterns return urlpatterns 只导入View结尾的,是类的视图 """ app_module = __import__(app_name) view_module = getattr(app_module, view_filename) views = dir(view_module) for view_name in views: if view_name.endswith('View'): view = getattr(view_module, view_name) if isinstance(view, object): if urlpatterns: urlpatterns += patterns('', url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name), ) else: urlpatterns = patterns('', url(r'^(?i)%s/$' % view_name, view.as_view(), name=view_name), ) else: pass return urlpatterns
[ "def", "register_views", "(", "app_name", ",", "view_filename", ",", "urlpatterns", "=", "None", ")", ":", "app_module", "=", "__import__", "(", "app_name", ")", "view_module", "=", "getattr", "(", "app_module", ",", "view_filename", ")", "views", "=", "dir", "(", "view_module", ")", "for", "view_name", "in", "views", ":", "if", "view_name", ".", "endswith", "(", "'View'", ")", ":", "view", "=", "getattr", "(", "view_module", ",", "view_name", ")", "if", "isinstance", "(", "view", ",", "object", ")", ":", "if", "urlpatterns", ":", "urlpatterns", "+=", "patterns", "(", "''", ",", "url", "(", "r'^(?i)%s/$'", "%", "view_name", ",", "view", ".", "as_view", "(", ")", ",", "name", "=", "view_name", ")", ",", ")", "else", ":", "urlpatterns", "=", "patterns", "(", "''", ",", "url", "(", "r'^(?i)%s/$'", "%", "view_name", ",", "view", ".", "as_view", "(", ")", ",", "name", "=", "view_name", ")", ",", ")", "else", ":", "pass", "return", "urlpatterns" ]
app_name APP名 view_filename views 所在的文件 urlpatterns url中已经存在的urlpatterns return urlpatterns 只导入View结尾的,是类的视图
[ "app_name", "APP名", "view_filename", "views", "所在的文件", "urlpatterns", "url中已经存在的urlpatterns" ]
python
valid
33.25
5j9/wikitextparser
wikitextparser/_parser_function.py
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_parser_function.py#L66-L71
def name(self) -> str: """Return template's name (includes whitespace).""" h = self._atomic_partition(self._first_arg_sep)[0] if len(h) == len(self.string): return h[2:-2] return h[2:]
[ "def", "name", "(", "self", ")", "->", "str", ":", "h", "=", "self", ".", "_atomic_partition", "(", "self", ".", "_first_arg_sep", ")", "[", "0", "]", "if", "len", "(", "h", ")", "==", "len", "(", "self", ".", "string", ")", ":", "return", "h", "[", "2", ":", "-", "2", "]", "return", "h", "[", "2", ":", "]" ]
Return template's name (includes whitespace).
[ "Return", "template", "s", "name", "(", "includes", "whitespace", ")", "." ]
python
test
37.166667
samirelanduk/quickplots
quickplots/charts.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/charts.py#L427-L458
def x_upper_limit(self, limit=None): """Returns or sets (if a value is provided) the value at which the x-axis should end. By default this is the highest x value in the associated series. :param limit: If given, the chart's x_upper_limit will be set to this. :raises ValueError: if you try to make the upper limit smaller than the\ lower limit.""" if limit is None: if self._x_upper_limit is None: if self.smallest_x() == self.largest_x(): if int(self.largest_x()) == float(self.largest_x()): return self.largest_x() + 1 else: return math.ceil(self.largest_x()) else: return self.largest_x() else: return self._x_upper_limit else: if not is_numeric(limit): raise TypeError( "upper x limit must be numeric, not '%s'" % str(limit) ) if limit <= self.smallest_x(): raise ValueError( "upper x limit must be greater than lower limit (%s), not %s" % ( str(self.smallest_x()), str(limit) ) ) self._x_upper_limit = limit
[ "def", "x_upper_limit", "(", "self", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "None", ":", "if", "self", ".", "_x_upper_limit", "is", "None", ":", "if", "self", ".", "smallest_x", "(", ")", "==", "self", ".", "largest_x", "(", ")", ":", "if", "int", "(", "self", ".", "largest_x", "(", ")", ")", "==", "float", "(", "self", ".", "largest_x", "(", ")", ")", ":", "return", "self", ".", "largest_x", "(", ")", "+", "1", "else", ":", "return", "math", ".", "ceil", "(", "self", ".", "largest_x", "(", ")", ")", "else", ":", "return", "self", ".", "largest_x", "(", ")", "else", ":", "return", "self", ".", "_x_upper_limit", "else", ":", "if", "not", "is_numeric", "(", "limit", ")", ":", "raise", "TypeError", "(", "\"upper x limit must be numeric, not '%s'\"", "%", "str", "(", "limit", ")", ")", "if", "limit", "<=", "self", ".", "smallest_x", "(", ")", ":", "raise", "ValueError", "(", "\"upper x limit must be greater than lower limit (%s), not %s\"", "%", "(", "str", "(", "self", ".", "smallest_x", "(", ")", ")", ",", "str", "(", "limit", ")", ")", ")", "self", ".", "_x_upper_limit", "=", "limit" ]
Returns or sets (if a value is provided) the value at which the x-axis should end. By default this is the highest x value in the associated series. :param limit: If given, the chart's x_upper_limit will be set to this. :raises ValueError: if you try to make the upper limit smaller than the\ lower limit.
[ "Returns", "or", "sets", "(", "if", "a", "value", "is", "provided", ")", "the", "value", "at", "which", "the", "x", "-", "axis", "should", "end", ".", "By", "default", "this", "is", "the", "highest", "x", "value", "in", "the", "associated", "series", "." ]
python
train
40.5
p3trus/slave
slave/quantum_design/ppms.py
https://github.com/p3trus/slave/blob/bdc74e73bd0f47b74a090c43aa2283c469cde3be/slave/quantum_design/ppms.py#L444-L452
def beep(self, duration, frequency): """Generates a beep. :param duration: The duration in seconds, in the range 0.1 to 5. :param frequency: The frequency in Hz, in the range 500 to 5000. """ cmd = 'BEEP', [Float(min=0.1, max=5.0), Integer(min=500, max=5000)] self._write(cmd, duration, frequency)
[ "def", "beep", "(", "self", ",", "duration", ",", "frequency", ")", ":", "cmd", "=", "'BEEP'", ",", "[", "Float", "(", "min", "=", "0.1", ",", "max", "=", "5.0", ")", ",", "Integer", "(", "min", "=", "500", ",", "max", "=", "5000", ")", "]", "self", ".", "_write", "(", "cmd", ",", "duration", ",", "frequency", ")" ]
Generates a beep. :param duration: The duration in seconds, in the range 0.1 to 5. :param frequency: The frequency in Hz, in the range 500 to 5000.
[ "Generates", "a", "beep", "." ]
python
train
37.666667
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L9493-L9529
def power_on_vm(name, datacenter=None, service_instance=None): ''' Powers on a virtual machine specified by it's name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.power_on_vm name=my_vm ''' log.trace('Powering on virtual machine %s', name) vm_properties = [ 'name', 'summary.runtime.powerState' ] virtual_machine = salt.utils.vmware.get_vm_by_property( service_instance, name, datacenter=datacenter, vm_properties=vm_properties) if virtual_machine['summary.runtime.powerState'] == 'poweredOn': result = {'comment': 'Virtual machine is already powered on', 'changes': {'power_on': True}} return result salt.utils.vmware.power_cycle_vm(virtual_machine['object'], action='on') result = {'comment': 'Virtual machine power on action succeeded', 'changes': {'power_on': True}} return result
[ "def", "power_on_vm", "(", "name", ",", "datacenter", "=", "None", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Powering on virtual machine %s'", ",", "name", ")", "vm_properties", "=", "[", "'name'", ",", "'summary.runtime.powerState'", "]", "virtual_machine", "=", "salt", ".", "utils", ".", "vmware", ".", "get_vm_by_property", "(", "service_instance", ",", "name", ",", "datacenter", "=", "datacenter", ",", "vm_properties", "=", "vm_properties", ")", "if", "virtual_machine", "[", "'summary.runtime.powerState'", "]", "==", "'poweredOn'", ":", "result", "=", "{", "'comment'", ":", "'Virtual machine is already powered on'", ",", "'changes'", ":", "{", "'power_on'", ":", "True", "}", "}", "return", "result", "salt", ".", "utils", ".", "vmware", ".", "power_cycle_vm", "(", "virtual_machine", "[", "'object'", "]", ",", "action", "=", "'on'", ")", "result", "=", "{", "'comment'", ":", "'Virtual machine power on action succeeded'", ",", "'changes'", ":", "{", "'power_on'", ":", "True", "}", "}", "return", "result" ]
Powers on a virtual machine specified by it's name. name Name of the virtual machine datacenter Datacenter of the virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.power_on_vm name=my_vm
[ "Powers", "on", "a", "virtual", "machine", "specified", "by", "it", "s", "name", "." ]
python
train
30.027027
i3visio/deepify
deepify/tor.py
https://github.com/i3visio/deepify/blob/2af04e0bea3eaabe96b0565e10f7eeb29b042a2b/deepify/tor.py#L53-L93
def _grabContentFromUrl(self, url): """ Function that abstracts capturing a URL. This method rewrites the one from Wrapper. :param url: The URL to be processed. :return: The response in a Json format. """ # Defining an empty object for the response response = {} # This part has to be modified... try: # Configuring the socket socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, self.info["host"], int(self.info["port"]), True) s = socks.socksocket() # Extracting the domain from the URL domain = self.getDomainFromUrl(url) s.connect((domain, 80)) message = 'GET ' + url + ' HTTP/1.0\r\n\r\n' s.sendall(message) data = "" while True: reply = s.recv(4096) if not reply: break else: data += reply # Processing data as expected response = self._createDataStructure(data) # Try to make the errors clear for other users except socks.ProxyConnectionError, sPCE: errMsg = "ERROR socks.ProxyConnectionError. Something seems to be wrong with the Tor Bundler." raise Exception( errMsg + " " + str(sPCE)) return response
[ "def", "_grabContentFromUrl", "(", "self", ",", "url", ")", ":", "# Defining an empty object for the response", "response", "=", "{", "}", "# This part has to be modified... ", "try", ":", "# Configuring the socket", "socks", ".", "setdefaultproxy", "(", "socks", ".", "PROXY_TYPE_SOCKS5", ",", "self", ".", "info", "[", "\"host\"", "]", ",", "int", "(", "self", ".", "info", "[", "\"port\"", "]", ")", ",", "True", ")", "s", "=", "socks", ".", "socksocket", "(", ")", "# Extracting the domain from the URL", "domain", "=", "self", ".", "getDomainFromUrl", "(", "url", ")", "s", ".", "connect", "(", "(", "domain", ",", "80", ")", ")", "message", "=", "'GET '", "+", "url", "+", "' HTTP/1.0\\r\\n\\r\\n'", "s", ".", "sendall", "(", "message", ")", "data", "=", "\"\"", "while", "True", ":", "reply", "=", "s", ".", "recv", "(", "4096", ")", "if", "not", "reply", ":", "break", "else", ":", "data", "+=", "reply", "# Processing data as expected", "response", "=", "self", ".", "_createDataStructure", "(", "data", ")", "# Try to make the errors clear for other users", "except", "socks", ".", "ProxyConnectionError", ",", "sPCE", ":", "errMsg", "=", "\"ERROR socks.ProxyConnectionError. Something seems to be wrong with the Tor Bundler.\"", "raise", "Exception", "(", "errMsg", "+", "\" \"", "+", "str", "(", "sPCE", ")", ")", "return", "response" ]
Function that abstracts capturing a URL. This method rewrites the one from Wrapper. :param url: The URL to be processed. :return: The response in a Json format.
[ "Function", "that", "abstracts", "capturing", "a", "URL", ".", "This", "method", "rewrites", "the", "one", "from", "Wrapper", ".", ":", "param", "url", ":", "The", "URL", "to", "be", "processed", ".", ":", "return", ":", "The", "response", "in", "a", "Json", "format", "." ]
python
train
36.121951
msoulier/tftpy
tftpy/TftpStates.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L55-L80
def returnSupportedOptions(self, options): """This method takes a requested options list from a client, and returns the ones that are supported.""" # We support the options blksize and tsize right now. # FIXME - put this somewhere else? accepted_options = {} for option in options: if option == 'blksize': # Make sure it's valid. if int(options[option]) > MAX_BLKSIZE: log.info("Client requested blksize greater than %d " "setting to maximum" % MAX_BLKSIZE) accepted_options[option] = MAX_BLKSIZE elif int(options[option]) < MIN_BLKSIZE: log.info("Client requested blksize less than %d " "setting to minimum" % MIN_BLKSIZE) accepted_options[option] = MIN_BLKSIZE else: accepted_options[option] = options[option] elif option == 'tsize': log.debug("tsize option is set") accepted_options['tsize'] = 0 else: log.info("Dropping unsupported option '%s'" % option) log.debug("Returning these accepted options: %s", accepted_options) return accepted_options
[ "def", "returnSupportedOptions", "(", "self", ",", "options", ")", ":", "# We support the options blksize and tsize right now.", "# FIXME - put this somewhere else?", "accepted_options", "=", "{", "}", "for", "option", "in", "options", ":", "if", "option", "==", "'blksize'", ":", "# Make sure it's valid.", "if", "int", "(", "options", "[", "option", "]", ")", ">", "MAX_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize greater than %d \"", "\"setting to maximum\"", "%", "MAX_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MAX_BLKSIZE", "elif", "int", "(", "options", "[", "option", "]", ")", "<", "MIN_BLKSIZE", ":", "log", ".", "info", "(", "\"Client requested blksize less than %d \"", "\"setting to minimum\"", "%", "MIN_BLKSIZE", ")", "accepted_options", "[", "option", "]", "=", "MIN_BLKSIZE", "else", ":", "accepted_options", "[", "option", "]", "=", "options", "[", "option", "]", "elif", "option", "==", "'tsize'", ":", "log", ".", "debug", "(", "\"tsize option is set\"", ")", "accepted_options", "[", "'tsize'", "]", "=", "0", "else", ":", "log", ".", "info", "(", "\"Dropping unsupported option '%s'\"", "%", "option", ")", "log", ".", "debug", "(", "\"Returning these accepted options: %s\"", ",", "accepted_options", ")", "return", "accepted_options" ]
This method takes a requested options list from a client, and returns the ones that are supported.
[ "This", "method", "takes", "a", "requested", "options", "list", "from", "a", "client", "and", "returns", "the", "ones", "that", "are", "supported", "." ]
python
train
49.807692
csparpa/pyowm
pyowm/commons/tile.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/commons/tile.py#L71-L87
def geoocoords_to_tile_coords(cls, lon, lat, zoom): """ Calculates the tile numbers corresponding to the specified geocoordinates at the specified zoom level Coordinates shall be provided in degrees and using the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection) :param lon: longitude :type lon: int or float :param lat: latitude :type lat: int or float :param zoom: zoom level :type zoom: int :return: a tuple (x, y) containing the tile-coordinates """ n = 2.0 ** zoom x = int((lon + 180.0) / 360.0 * n) y = int((1.0 - math.log(math.tan(math.radians(lat)) + (1 / math.cos(math.radians(lat)))) / math.pi) / 2.0 * n) return x, y
[ "def", "geoocoords_to_tile_coords", "(", "cls", ",", "lon", ",", "lat", ",", "zoom", ")", ":", "n", "=", "2.0", "**", "zoom", "x", "=", "int", "(", "(", "lon", "+", "180.0", ")", "/", "360.0", "*", "n", ")", "y", "=", "int", "(", "(", "1.0", "-", "math", ".", "log", "(", "math", ".", "tan", "(", "math", ".", "radians", "(", "lat", ")", ")", "+", "(", "1", "/", "math", ".", "cos", "(", "math", ".", "radians", "(", "lat", ")", ")", ")", ")", "/", "math", ".", "pi", ")", "/", "2.0", "*", "n", ")", "return", "x", ",", "y" ]
Calculates the tile numbers corresponding to the specified geocoordinates at the specified zoom level Coordinates shall be provided in degrees and using the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection) :param lon: longitude :type lon: int or float :param lat: latitude :type lat: int or float :param zoom: zoom level :type zoom: int :return: a tuple (x, y) containing the tile-coordinates
[ "Calculates", "the", "tile", "numbers", "corresponding", "to", "the", "specified", "geocoordinates", "at", "the", "specified", "zoom", "level", "Coordinates", "shall", "be", "provided", "in", "degrees", "and", "using", "the", "Mercator", "Projection", "(", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Mercator_projection", ")" ]
python
train
44.294118
RedHatInsights/insights-core
insights/parsers/oracle.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/oracle.py#L18-L41
def _parse_oracle(lines): """ Performs the actual file parsing, returning a dict of the config values in a given Oracle DB config file. Despite their differences, the two filetypes are similar enough to allow idential parsing. """ config = {} for line in get_active_lines(lines): # Check for NULL in line to begin control char removal if '\00' in line: line = cleanup.sub('', line) if '=' in line: (key, value) = line.split('=', 1) key = key.strip(whitespace + '"\'').lower() if ',' in line: value = [s.strip(whitespace + '"\'').lower() for s in value.split(',')] else: value = value.strip(whitespace + '"\'').lower() config[key] = value return config
[ "def", "_parse_oracle", "(", "lines", ")", ":", "config", "=", "{", "}", "for", "line", "in", "get_active_lines", "(", "lines", ")", ":", "# Check for NULL in line to begin control char removal", "if", "'\\00'", "in", "line", ":", "line", "=", "cleanup", ".", "sub", "(", "''", ",", "line", ")", "if", "'='", "in", "line", ":", "(", "key", ",", "value", ")", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "key", "=", "key", ".", "strip", "(", "whitespace", "+", "'\"\\''", ")", ".", "lower", "(", ")", "if", "','", "in", "line", ":", "value", "=", "[", "s", ".", "strip", "(", "whitespace", "+", "'\"\\''", ")", ".", "lower", "(", ")", "for", "s", "in", "value", ".", "split", "(", "','", ")", "]", "else", ":", "value", "=", "value", ".", "strip", "(", "whitespace", "+", "'\"\\''", ")", ".", "lower", "(", ")", "config", "[", "key", "]", "=", "value", "return", "config" ]
Performs the actual file parsing, returning a dict of the config values in a given Oracle DB config file. Despite their differences, the two filetypes are similar enough to allow idential parsing.
[ "Performs", "the", "actual", "file", "parsing", "returning", "a", "dict", "of", "the", "config", "values", "in", "a", "given", "Oracle", "DB", "config", "file", "." ]
python
train
33.125
openstack/networking-cisco
networking_cisco/apps/saf/agent/vdp/ovs_vdp.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/vdp/ovs_vdp.py#L205-L236
def _flow_check_handler_internal(self): """Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here. """ integ_flow = self.integ_br_obj.dump_flows_for( in_port=self.int_peer_port_num) ext_flow = self.ext_br_obj.dump_flows_for( in_port=self.phy_peer_port_num) for net_uuid, lvm in six.iteritems(self.local_vlan_map): vdp_vlan = lvm.any_consistent_vlan() flow_required = False if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)): return if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on Integ bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on External bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if flow_required: LOG.info("Programming flows for lvid %(lvid)s vdp vlan" " %(vdp)s", {'lvid': lvm.lvid, 'vdp': vdp_vlan}) self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
[ "def", "_flow_check_handler_internal", "(", "self", ")", ":", "integ_flow", "=", "self", ".", "integ_br_obj", ".", "dump_flows_for", "(", "in_port", "=", "self", ".", "int_peer_port_num", ")", "ext_flow", "=", "self", ".", "ext_br_obj", ".", "dump_flows_for", "(", "in_port", "=", "self", ".", "phy_peer_port_num", ")", "for", "net_uuid", ",", "lvm", "in", "six", ".", "iteritems", "(", "self", ".", "local_vlan_map", ")", ":", "vdp_vlan", "=", "lvm", ".", "any_consistent_vlan", "(", ")", "flow_required", "=", "False", "if", "not", "(", "vdp_vlan", "and", "ovs_lib", ".", "is_valid_vlan_tag", "(", "vdp_vlan", ")", ")", ":", "return", "if", "not", "self", ".", "_check_bridge_flow", "(", "integ_flow", ",", "vdp_vlan", ",", "lvm", ".", "lvid", ")", ":", "LOG", ".", "error", "(", "\"Flow for VDP Vlan %(vdp_vlan)s, Local vlan \"", "\"%(lvid)s not present on Integ bridge\"", ",", "{", "'vdp_vlan'", ":", "vdp_vlan", ",", "'lvid'", ":", "lvm", ".", "lvid", "}", ")", "flow_required", "=", "True", "if", "not", "self", ".", "_check_bridge_flow", "(", "ext_flow", ",", "lvm", ".", "lvid", ",", "vdp_vlan", ")", ":", "LOG", ".", "error", "(", "\"Flow for VDP Vlan %(vdp_vlan)s, Local vlan \"", "\"%(lvid)s not present on External bridge\"", ",", "{", "'vdp_vlan'", ":", "vdp_vlan", ",", "'lvid'", ":", "lvm", ".", "lvid", "}", ")", "flow_required", "=", "True", "if", "flow_required", ":", "LOG", ".", "info", "(", "\"Programming flows for lvid %(lvid)s vdp vlan\"", "\" %(vdp)s\"", ",", "{", "'lvid'", ":", "lvm", ".", "lvid", ",", "'vdp'", ":", "vdp_vlan", "}", ")", "self", ".", "program_vm_ovs_flows", "(", "lvm", ".", "lvid", ",", "0", ",", "vdp_vlan", ")" ]
Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here.
[ "Periodic", "handler", "to", "check", "if", "installed", "flows", "are", "present", "." ]
python
train
53.9375
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2029-L2061
def com_google_fonts_check_metadata_italic_style(ttFont, font_metadata): """METADATA.pb font.style "italic" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "italic": yield SKIP, "This check only applies to italic fonts." else: font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_fullname) == 0: yield SKIP, "Font lacks fullname entries in name table." # this fail scenario was already checked above # (passing those previous checks is a prerequisite for this one) # FIXME: Could we pack this into a condition ? else: # FIXME: here we only check the first name entry. # Should we iterate over them all ? Or should we check # if they're all the same? font_fullname = font_fullname[0] if not bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", "METADATA.pb style has been set to italic" " but font macStyle is improperly set.") elif not font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("bad-fullfont-name", ("Font macStyle Italic bit is set" " but nameID {} (\"{}\") is not ended with" " \"Italic\"").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("OK: METADATA.pb font.style \"italic\"" " matches font internals.")
[ "def", "com_google_fonts_check_metadata_italic_style", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "from", "fontbakery", ".", "constants", "import", "MacStyle", "if", "font_metadata", ".", "style", "!=", "\"italic\"", ":", "yield", "SKIP", ",", "\"This check only applies to italic fonts.\"", "else", ":", "font_fullname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "if", "len", "(", "font_fullname", ")", "==", "0", ":", "yield", "SKIP", ",", "\"Font lacks fullname entries in name table.\"", "# this fail scenario was already checked above", "# (passing those previous checks is a prerequisite for this one)", "# FIXME: Could we pack this into a condition ?", "else", ":", "# FIXME: here we only check the first name entry.", "# Should we iterate over them all ? Or should we check", "# if they're all the same?", "font_fullname", "=", "font_fullname", "[", "0", "]", "if", "not", "bool", "(", "ttFont", "[", "\"head\"", "]", ".", "macStyle", "&", "MacStyle", ".", "ITALIC", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"bad-macstyle\"", ",", "\"METADATA.pb style has been set to italic\"", "\" but font macStyle is improperly set.\"", ")", "elif", "not", "font_fullname", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", ".", "endswith", "(", "\"Italic\"", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"bad-fullfont-name\"", ",", "(", "\"Font macStyle Italic bit is set\"", "\" but nameID {} (\\\"{}\\\") is not ended with\"", "\" \\\"Italic\\\"\"", ")", ".", "format", "(", "NameID", ".", "FULL_FONT_NAME", ",", "font_fullname", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"OK: METADATA.pb font.style \\\"italic\\\"\"", "\" matches font internals.\"", ")" ]
METADATA.pb font.style "italic" matches font internals?
[ "METADATA", ".", "pb", "font", ".", "style", "italic", "matches", "font", "internals?" ]
python
train
49.272727
alerta/python-alerta-client
alertaclient/commands/cmd_top.py
https://github.com/alerta/python-alerta-client/blob/7eb367b5fe87d5fc20b54dea8cddd7f09e251afa/alertaclient/commands/cmd_top.py#L9-L15
def cli(obj): """Display alerts like unix "top" command.""" client = obj['client'] timezone = obj['timezone'] screen = Screen(client, timezone) screen.run()
[ "def", "cli", "(", "obj", ")", ":", "client", "=", "obj", "[", "'client'", "]", "timezone", "=", "obj", "[", "'timezone'", "]", "screen", "=", "Screen", "(", "client", ",", "timezone", ")", "screen", ".", "run", "(", ")" ]
Display alerts like unix "top" command.
[ "Display", "alerts", "like", "unix", "top", "command", "." ]
python
train
24.428571
watchforstock/evohome-client
evohomeclient2/__init__.py
https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L236-L243
def gateway(self): """Return the detail of the gateway.""" url = 'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway' response = requests.get(url, headers=self._headers()) response.raise_for_status() return response.json()
[ "def", "gateway", "(", "self", ")", ":", "url", "=", "'https://tccna.honeywell.com/WebAPI/emea/api/v1/gateway'", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "_headers", "(", ")", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Return the detail of the gateway.
[ "Return", "the", "detail", "of", "the", "gateway", "." ]
python
train
32.625
Becksteinlab/GromacsWrapper
gromacs/setup.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/setup.py#L761-L822
def em_schedule(**kwargs): """Run multiple energy minimizations one after each other. :Keywords: *integrators* list of integrators (from 'l-bfgs', 'cg', 'steep') [['bfgs', 'steep']] *nsteps* list of maximum number of steps; one for each integrator in in the *integrators* list [[100,1000]] *kwargs* mostly passed to :func:`gromacs.setup.energy_minimize` :Returns: dictionary with paths to final structure ('struct') and other files :Example: Conduct three minimizations: 1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps 2. steepest descent for 200 steps 3. finish with BFGS for another 30 steps We also do a multi-processor minimization when possible (i.e. for steep (and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see :mod:`gromacs.run` for details):: import gromacs.run gromacs.setup.em_schedule(struct='solvate/ionized.gro', mdrunner=gromacs.run.MDrunnerOpenMP64, integrators=['l-bfgs', 'steep', 'l-bfgs'], nsteps=[50,200, 50]) .. Note:: You might have to prepare the mdp file carefully because at the moment one can only modify the *nsteps* parameter on a per-minimizer basis. """ mdrunner = kwargs.pop('mdrunner', None) integrators = kwargs.pop('integrators', ['l-bfgs', 'steep']) kwargs.pop('integrator', None) # clean input; we set intgerator from integrators nsteps = kwargs.pop('nsteps', [100, 1000]) outputs = ['em{0:03d}_{1!s}.pdb'.format(i, integrator) for i,integrator in enumerate(integrators)] outputs[-1] = kwargs.pop('output', 'em.pdb') files = {'struct': kwargs.pop('struct', None)} # fake output from energy_minimize() for i, integrator in enumerate(integrators): struct = files['struct'] logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i]) kwargs.update({'struct':struct, 'output':outputs[i], 'integrator':integrator, 'nsteps': nsteps[i]}) if not integrator == 'l-bfgs': kwargs['mdrunner'] = mdrunner else: kwargs['mdrunner'] = None logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot " "do parallel runs.", i) files = energy_minimize(**kwargs) return files
[ "def", "em_schedule", "(", "*", "*", "kwargs", ")", ":", "mdrunner", "=", "kwargs", ".", "pop", "(", "'mdrunner'", ",", "None", ")", "integrators", "=", "kwargs", ".", "pop", "(", "'integrators'", ",", "[", "'l-bfgs'", ",", "'steep'", "]", ")", "kwargs", ".", "pop", "(", "'integrator'", ",", "None", ")", "# clean input; we set intgerator from integrators", "nsteps", "=", "kwargs", ".", "pop", "(", "'nsteps'", ",", "[", "100", ",", "1000", "]", ")", "outputs", "=", "[", "'em{0:03d}_{1!s}.pdb'", ".", "format", "(", "i", ",", "integrator", ")", "for", "i", ",", "integrator", "in", "enumerate", "(", "integrators", ")", "]", "outputs", "[", "-", "1", "]", "=", "kwargs", ".", "pop", "(", "'output'", ",", "'em.pdb'", ")", "files", "=", "{", "'struct'", ":", "kwargs", ".", "pop", "(", "'struct'", ",", "None", ")", "}", "# fake output from energy_minimize()", "for", "i", ",", "integrator", "in", "enumerate", "(", "integrators", ")", ":", "struct", "=", "files", "[", "'struct'", "]", "logger", ".", "info", "(", "\"[em %d] energy minimize with %s for maximum %d steps\"", ",", "i", ",", "integrator", ",", "nsteps", "[", "i", "]", ")", "kwargs", ".", "update", "(", "{", "'struct'", ":", "struct", ",", "'output'", ":", "outputs", "[", "i", "]", ",", "'integrator'", ":", "integrator", ",", "'nsteps'", ":", "nsteps", "[", "i", "]", "}", ")", "if", "not", "integrator", "==", "'l-bfgs'", ":", "kwargs", "[", "'mdrunner'", "]", "=", "mdrunner", "else", ":", "kwargs", "[", "'mdrunner'", "]", "=", "None", "logger", ".", "warning", "(", "\"[em %d] Not using mdrunner for L-BFGS because it cannot \"", "\"do parallel runs.\"", ",", "i", ")", "files", "=", "energy_minimize", "(", "*", "*", "kwargs", ")", "return", "files" ]
Run multiple energy minimizations one after each other. :Keywords: *integrators* list of integrators (from 'l-bfgs', 'cg', 'steep') [['bfgs', 'steep']] *nsteps* list of maximum number of steps; one for each integrator in in the *integrators* list [[100,1000]] *kwargs* mostly passed to :func:`gromacs.setup.energy_minimize` :Returns: dictionary with paths to final structure ('struct') and other files :Example: Conduct three minimizations: 1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps 2. steepest descent for 200 steps 3. finish with BFGS for another 30 steps We also do a multi-processor minimization when possible (i.e. for steep (and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see :mod:`gromacs.run` for details):: import gromacs.run gromacs.setup.em_schedule(struct='solvate/ionized.gro', mdrunner=gromacs.run.MDrunnerOpenMP64, integrators=['l-bfgs', 'steep', 'l-bfgs'], nsteps=[50,200, 50]) .. Note:: You might have to prepare the mdp file carefully because at the moment one can only modify the *nsteps* parameter on a per-minimizer basis.
[ "Run", "multiple", "energy", "minimizations", "one", "after", "each", "other", "." ]
python
valid
41.145161
h2oai/h2o-3
h2o-py/h2o/frame.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/frame.py#L2924-L2930
def toupper(self): """ Translate characters from lower to upper case for a particular column. :returns: new H2OFrame with all strings in the current frame converted to the uppercase. """ return H2OFrame._expr(expr=ExprNode("toupper", self), cache=self._ex._cache)
[ "def", "toupper", "(", "self", ")", ":", "return", "H2OFrame", ".", "_expr", "(", "expr", "=", "ExprNode", "(", "\"toupper\"", ",", "self", ")", ",", "cache", "=", "self", ".", "_ex", ".", "_cache", ")" ]
Translate characters from lower to upper case for a particular column. :returns: new H2OFrame with all strings in the current frame converted to the uppercase.
[ "Translate", "characters", "from", "lower", "to", "upper", "case", "for", "a", "particular", "column", "." ]
python
test
42.571429
caffeinehit/django-oauth2-provider
provider/views.py
https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/views.py#L60-L68
def cache_data(self, request, data, key='params'): """ Cache data in the session store. :param request: :attr:`django.http.HttpRequest` :param data: Arbitrary data to store. :param key: `str` The key under which to store the data. """ request.session['%s:%s' % (constants.SESSION_KEY, key)] = data
[ "def", "cache_data", "(", "self", ",", "request", ",", "data", ",", "key", "=", "'params'", ")", ":", "request", ".", "session", "[", "'%s:%s'", "%", "(", "constants", ".", "SESSION_KEY", ",", "key", ")", "]", "=", "data" ]
Cache data in the session store. :param request: :attr:`django.http.HttpRequest` :param data: Arbitrary data to store. :param key: `str` The key under which to store the data.
[ "Cache", "data", "in", "the", "session", "store", "." ]
python
train
38.444444
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L2187-L2198
def mark_offer_as_lose(self, offer_id): """ Mark offer as lose :param offer_id: the offer id :return Response """ return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=LOSE, )
[ "def", "mark_offer_as_lose", "(", "self", ",", "offer_id", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "OFFERS", ",", "billomat_id", "=", "offer_id", ",", "command", "=", "LOSE", ",", ")" ]
Mark offer as lose :param offer_id: the offer id :return Response
[ "Mark", "offer", "as", "lose" ]
python
train
23.583333
tensorflow/tensor2tensor
tensor2tensor/trax/trax.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trax.py#L336-L348
def _reshape_by_device_single(x, num_devices): """Reshape x into a shape [num_devices, ...].""" x_shape = list(x.shape) batch_size = x_shape[0] batch_size_per_device = batch_size // num_devices # We require that num_devices divides batch_size evenly. if batch_size_per_device * num_devices != batch_size: logging.fatal( "We require that num_devices[%d] divides batch_size[%d] evenly.", num_devices, batch_size) # New shape. new_shape_prefix = [num_devices, batch_size_per_device] return np.reshape(x, new_shape_prefix + x_shape[1:])
[ "def", "_reshape_by_device_single", "(", "x", ",", "num_devices", ")", ":", "x_shape", "=", "list", "(", "x", ".", "shape", ")", "batch_size", "=", "x_shape", "[", "0", "]", "batch_size_per_device", "=", "batch_size", "//", "num_devices", "# We require that num_devices divides batch_size evenly.", "if", "batch_size_per_device", "*", "num_devices", "!=", "batch_size", ":", "logging", ".", "fatal", "(", "\"We require that num_devices[%d] divides batch_size[%d] evenly.\"", ",", "num_devices", ",", "batch_size", ")", "# New shape.", "new_shape_prefix", "=", "[", "num_devices", ",", "batch_size_per_device", "]", "return", "np", ".", "reshape", "(", "x", ",", "new_shape_prefix", "+", "x_shape", "[", "1", ":", "]", ")" ]
Reshape x into a shape [num_devices, ...].
[ "Reshape", "x", "into", "a", "shape", "[", "num_devices", "...", "]", "." ]
python
train
42.923077
jonathf/chaospy
chaospy/descriptives/expected.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/descriptives/expected.py#L7-L69
def E(poly, dist=None, **kws): """ Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.] """ if not isinstance(poly, (distributions.Dist, polynomials.Poly)): print(type(poly)) print("Approximating expected value...") out = quadrature.quad(poly, dist, veceval=True, **kws) print("done") return out if isinstance(poly, distributions.Dist): dist, poly = poly, polynomials.variable(len(poly)) if not poly.keys: return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if len(dist) == 1: mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += A[keys[i]]*mom[i] out = numpy.reshape(out, shape) return out
[ "def", "E", "(", "poly", ",", "dist", "=", "None", ",", "*", "*", "kws", ")", ":", "if", "not", "isinstance", "(", "poly", ",", "(", "distributions", ".", "Dist", ",", "polynomials", ".", "Poly", ")", ")", ":", "print", "(", "type", "(", "poly", ")", ")", "print", "(", "\"Approximating expected value...\"", ")", "out", "=", "quadrature", ".", "quad", "(", "poly", ",", "dist", ",", "veceval", "=", "True", ",", "*", "*", "kws", ")", "print", "(", "\"done\"", ")", "return", "out", "if", "isinstance", "(", "poly", ",", "distributions", ".", "Dist", ")", ":", "dist", ",", "poly", "=", "poly", ",", "polynomials", ".", "variable", "(", "len", "(", "poly", ")", ")", "if", "not", "poly", ".", "keys", ":", "return", "numpy", ".", "zeros", "(", "poly", ".", "shape", ",", "dtype", "=", "int", ")", "if", "isinstance", "(", "poly", ",", "(", "list", ",", "tuple", ",", "numpy", ".", "ndarray", ")", ")", ":", "return", "[", "E", "(", "_", ",", "dist", ",", "*", "*", "kws", ")", "for", "_", "in", "poly", "]", "if", "poly", ".", "dim", "<", "len", "(", "dist", ")", ":", "poly", "=", "polynomials", ".", "setdim", "(", "poly", ",", "len", "(", "dist", ")", ")", "shape", "=", "poly", ".", "shape", "poly", "=", "polynomials", ".", "flatten", "(", "poly", ")", "keys", "=", "poly", ".", "keys", "mom", "=", "dist", ".", "mom", "(", "numpy", ".", "array", "(", "keys", ")", ".", "T", ",", "*", "*", "kws", ")", "A", "=", "poly", ".", "A", "if", "len", "(", "dist", ")", "==", "1", ":", "mom", "=", "mom", "[", "0", "]", "out", "=", "numpy", ".", "zeros", "(", "poly", ".", "shape", ")", "for", "i", "in", "range", "(", "len", "(", "keys", ")", ")", ":", "out", "+=", "A", "[", "keys", "[", "i", "]", "]", "*", "mom", "[", "i", "]", "out", "=", "numpy", ".", "reshape", "(", "out", ",", "shape", ")", "return", "out" ]
Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist): Input to take expected value on. dist (Dist): Defines the space the expected value is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): The expected value of the polynomial or distribution, where ``expected.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.E(dist)) [1. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.E(poly, dist)) [1. 1. 0. 0.]
[ "Expected", "value", "operator", "." ]
python
train
27.634921
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/pefile.py#L663-L676
def ask_pascal_16(self, next_rva_ptr): """The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word. """ length = self.__get_pascal_16_length() if length == (next_rva_ptr - (self.rva_ptr+2)) / 2: self.length = length return True return False
[ "def", "ask_pascal_16", "(", "self", ",", "next_rva_ptr", ")", ":", "length", "=", "self", ".", "__get_pascal_16_length", "(", ")", "if", "length", "==", "(", "next_rva_ptr", "-", "(", "self", ".", "rva_ptr", "+", "2", ")", ")", "/", "2", ":", "self", ".", "length", "=", "length", "return", "True", "return", "False" ]
The next RVA is taken to be the one immediately following this one. Such RVA could indicate the natural end of the string and will be checked with the possible length contained in the first word.
[ "The", "next", "RVA", "is", "taken", "to", "be", "the", "one", "immediately", "following", "this", "one", ".", "Such", "RVA", "could", "indicate", "the", "natural", "end", "of", "the", "string", "and", "will", "be", "checked", "with", "the", "possible", "length", "contained", "in", "the", "first", "word", "." ]
python
train
34.357143
senaite/senaite.core
bika/lims/exportimport/instruments/sysmex/xt/i1800.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/sysmex/xt/i1800.py#L142-L155
def _handle_result_line(self, sline): """ Parses the data line and adds to the dictionary. :param sline: a split data line to parse :returns: the number of rows to jump and parse the next data line or return the code error -1 """ as_kw = sline[3] a_result = str(sline[5].split('^')[0]) self._cur_values[as_kw] = { 'DefaultResult': 'Result', 'Result': a_result } return 0
[ "def", "_handle_result_line", "(", "self", ",", "sline", ")", ":", "as_kw", "=", "sline", "[", "3", "]", "a_result", "=", "str", "(", "sline", "[", "5", "]", ".", "split", "(", "'^'", ")", "[", "0", "]", ")", "self", ".", "_cur_values", "[", "as_kw", "]", "=", "{", "'DefaultResult'", ":", "'Result'", ",", "'Result'", ":", "a_result", "}", "return", "0" ]
Parses the data line and adds to the dictionary. :param sline: a split data line to parse :returns: the number of rows to jump and parse the next data line or return the code error -1
[ "Parses", "the", "data", "line", "and", "adds", "to", "the", "dictionary", ".", ":", "param", "sline", ":", "a", "split", "data", "line", "to", "parse", ":", "returns", ":", "the", "number", "of", "rows", "to", "jump", "and", "parse", "the", "next", "data", "line", "or", "return", "the", "code", "error", "-", "1" ]
python
train
33.5
micha030201/aionationstates
aionationstates/nation_.py
https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/nation_.py#L502-L519
async def freedomscores(self, root): """Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of int Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``. """ elem = root.find('FREEDOMSCORES') result = OrderedDict() result['Civil Rights'] = int(elem.find('CIVILRIGHTS').text) result['Economy'] = int(elem.find('ECONOMY').text) result['Political Freedom'] = int(elem.find('POLITICALFREEDOM').text) return result
[ "async", "def", "freedomscores", "(", "self", ",", "root", ")", ":", "elem", "=", "root", ".", "find", "(", "'FREEDOMSCORES'", ")", "result", "=", "OrderedDict", "(", ")", "result", "[", "'Civil Rights'", "]", "=", "int", "(", "elem", ".", "find", "(", "'CIVILRIGHTS'", ")", ".", "text", ")", "result", "[", "'Economy'", "]", "=", "int", "(", "elem", ".", "find", "(", "'ECONOMY'", ")", ".", "text", ")", "result", "[", "'Political Freedom'", "]", "=", "int", "(", "elem", ".", "find", "(", "'POLITICALFREEDOM'", ")", ".", "text", ")", "return", "result" ]
Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as percentages. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys of str and values of int Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``.
[ "Nation", "s", "Freedoms", ":", "three", "basic", "indicators", "of", "the", "nation", "s", "Civil", "Rights", "Economy", "and", "Political", "Freedom", "as", "percentages", "." ]
python
train
40
EnergieID/smappy
smappy/smappy.py
https://github.com/EnergieID/smappy/blob/1ada3abc9a51c76205c072369258f6f4f4e8fd0f/smappy/smappy.py#L276-L311
def get_events(self, service_location_id, appliance_id, start, end, max_number=None): """ Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict """ start = self._to_milliseconds(start) end = self._to_milliseconds(end) url = urljoin(URLS['servicelocation'], service_location_id, "events") headers = {"Authorization": "Bearer {}".format(self.access_token)} params = { "from": start, "to": end, "applianceId": appliance_id, "maxNumber": max_number } r = requests.get(url, headers=headers, params=params) r.raise_for_status() return r.json()
[ "def", "get_events", "(", "self", ",", "service_location_id", ",", "appliance_id", ",", "start", ",", "end", ",", "max_number", "=", "None", ")", ":", "start", "=", "self", ".", "_to_milliseconds", "(", "start", ")", "end", "=", "self", ".", "_to_milliseconds", "(", "end", ")", "url", "=", "urljoin", "(", "URLS", "[", "'servicelocation'", "]", ",", "service_location_id", ",", "\"events\"", ")", "headers", "=", "{", "\"Authorization\"", ":", "\"Bearer {}\"", ".", "format", "(", "self", ".", "access_token", ")", "}", "params", "=", "{", "\"from\"", ":", "start", ",", "\"to\"", ":", "end", ",", "\"applianceId\"", ":", "appliance_id", ",", "\"maxNumber\"", ":", "max_number", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "return", "r", ".", "json", "(", ")" ]
Request events for a given appliance Parameters ---------- service_location_id : int appliance_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp timezone-naive datetimes are assumed to be in UTC max_number : int, optional The maximum number of events that should be returned by this query Default returns all events in the selected period Returns ------- dict
[ "Request", "events", "for", "a", "given", "appliance" ]
python
train
34.166667
tanghaibao/jcvi
jcvi/assembly/bambus.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/bambus.py#L22-L99
def scaffold(args): """ %prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings. """ from jcvi.formats.base import FileMerger from jcvi.formats.bed import mates from jcvi.formats.contig import frombed from jcvi.formats.fasta import join from jcvi.utils.iter import grouper p = OptionParser(scaffold.__doc__) p.set_rclip(rclip=1) p.add_option("--conf", help="BAMBUS configuration file [default: %default]") p.add_option("--prefix", default=False, action="store_true", help="Only keep links between IDs with same prefix [default: %default]") opts, args = p.parse_args(args) nargs = len(args) if nargs < 3 or nargs % 2 != 1: sys.exit(not p.print_help()) rclip = opts.rclip ctgfasta = args[0] duos = list(grouper(args[1:], 2)) trios = [] for fastafile, bedfile in duos: prefix = bedfile.rsplit(".", 1)[0] matefile = prefix + ".mates" matebedfile = matefile + ".bed" if need_update(bedfile, [matefile, matebedfile]): matesopt = [bedfile, "--lib", "--nointra", "--rclip={0}".format(rclip), "--cutoff={0}".format(opts.cutoff)] if opts.prefix: matesopt += ["--prefix"] matefile, matebedfile = mates(matesopt) trios.append((fastafile, matebedfile, matefile)) # Merge the readfasta, bedfile and matefile bbfasta, bbbed, bbmate = "bambus.reads.fasta", "bambus.bed", "bambus.mates" for files, outfile in zip(zip(*trios), (bbfasta, bbbed, bbmate)): FileMerger(files, outfile=outfile).merge(checkexists=True) ctgfile = "bambus.contig" idsfile = "bambus.ids" frombedInputs = [bbbed, ctgfasta, bbfasta] if need_update(frombedInputs, ctgfile): frombed(frombedInputs) inputfasta = "bambus.contigs.fasta" singletonfasta = "bambus.singletons.fasta" cmd = "faSomeRecords {0} {1} ".format(ctgfasta, idsfile) sh(cmd + inputfasta) sh(cmd + singletonfasta + " -exclude") # Run bambus prefix = "bambus" cmd = "goBambus -c {0} -m {1} -o {2}".format(ctgfile, bbmate, prefix) if opts.conf: cmd += " -C {0}".format(opts.conf) sh(cmd) cmd = "untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml".\ format(prefix) sh(cmd) final = "final" cmd = "printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib " \ "-merge -detail -oo -sum -o {1}".format(prefix, final) sh(cmd) oofile = final + ".oo" join([inputfasta, "--oo={0}".format(oofile)])
[ "def", "scaffold", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "FileMerger", "from", "jcvi", ".", "formats", ".", "bed", "import", "mates", "from", "jcvi", ".", "formats", ".", "contig", "import", "frombed", "from", "jcvi", ".", "formats", ".", "fasta", "import", "join", "from", "jcvi", ".", "utils", ".", "iter", "import", "grouper", "p", "=", "OptionParser", "(", "scaffold", ".", "__doc__", ")", "p", ".", "set_rclip", "(", "rclip", "=", "1", ")", "p", ".", "add_option", "(", "\"--conf\"", ",", "help", "=", "\"BAMBUS configuration file [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--prefix\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Only keep links between IDs with same prefix [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "nargs", "=", "len", "(", "args", ")", "if", "nargs", "<", "3", "or", "nargs", "%", "2", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "rclip", "=", "opts", ".", "rclip", "ctgfasta", "=", "args", "[", "0", "]", "duos", "=", "list", "(", "grouper", "(", "args", "[", "1", ":", "]", ",", "2", ")", ")", "trios", "=", "[", "]", "for", "fastafile", ",", "bedfile", "in", "duos", ":", "prefix", "=", "bedfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "matefile", "=", "prefix", "+", "\".mates\"", "matebedfile", "=", "matefile", "+", "\".bed\"", "if", "need_update", "(", "bedfile", ",", "[", "matefile", ",", "matebedfile", "]", ")", ":", "matesopt", "=", "[", "bedfile", ",", "\"--lib\"", ",", "\"--nointra\"", ",", "\"--rclip={0}\"", ".", "format", "(", "rclip", ")", ",", "\"--cutoff={0}\"", ".", "format", "(", "opts", ".", "cutoff", ")", "]", "if", "opts", ".", "prefix", ":", "matesopt", "+=", "[", "\"--prefix\"", "]", "matefile", ",", "matebedfile", "=", "mates", "(", "matesopt", ")", "trios", ".", "append", "(", "(", "fastafile", ",", "matebedfile", ",", "matefile", ")", ")", "# Merge the readfasta, bedfile and matefile", "bbfasta", ",", "bbbed", ",", "bbmate", "=", "\"bambus.reads.fasta\"", ",", "\"bambus.bed\"", ",", "\"bambus.mates\"", "for", "files", ",", "outfile", "in", "zip", "(", "zip", "(", "*", "trios", ")", ",", "(", "bbfasta", ",", "bbbed", ",", "bbmate", ")", ")", ":", "FileMerger", "(", "files", ",", "outfile", "=", "outfile", ")", ".", "merge", "(", "checkexists", "=", "True", ")", "ctgfile", "=", "\"bambus.contig\"", "idsfile", "=", "\"bambus.ids\"", "frombedInputs", "=", "[", "bbbed", ",", "ctgfasta", ",", "bbfasta", "]", "if", "need_update", "(", "frombedInputs", ",", "ctgfile", ")", ":", "frombed", "(", "frombedInputs", ")", "inputfasta", "=", "\"bambus.contigs.fasta\"", "singletonfasta", "=", "\"bambus.singletons.fasta\"", "cmd", "=", "\"faSomeRecords {0} {1} \"", ".", "format", "(", "ctgfasta", ",", "idsfile", ")", "sh", "(", "cmd", "+", "inputfasta", ")", "sh", "(", "cmd", "+", "singletonfasta", "+", "\" -exclude\"", ")", "# Run bambus", "prefix", "=", "\"bambus\"", "cmd", "=", "\"goBambus -c {0} -m {1} -o {2}\"", ".", "format", "(", "ctgfile", ",", "bbmate", ",", "prefix", ")", "if", "opts", ".", "conf", ":", "cmd", "+=", "\" -C {0}\"", ".", "format", "(", "opts", ".", "conf", ")", "sh", "(", "cmd", ")", "cmd", "=", "\"untangle -e {0}.evidence.xml -s {0}.out.xml -o {0}.untangle.xml\"", ".", "format", "(", "prefix", ")", "sh", "(", "cmd", ")", "final", "=", "\"final\"", "cmd", "=", "\"printScaff -e {0}.evidence.xml -s {0}.untangle.xml -l {0}.lib \"", "\"-merge -detail -oo -sum -o {1}\"", ".", "format", "(", "prefix", ",", "final", ")", "sh", "(", "cmd", ")", "oofile", "=", "final", "+", "\".oo\"", "join", "(", "[", "inputfasta", ",", "\"--oo={0}\"", ".", "format", "(", "oofile", ")", "]", ")" ]
%prog scaffold ctgfasta reads1.fasta mapping1.bed reads2.fasta mapping2.bed ... Run BAMBUS on set of contigs, reads and read mappings.
[ "%prog", "scaffold", "ctgfasta", "reads1", ".", "fasta", "mapping1", ".", "bed", "reads2", ".", "fasta", "mapping2", ".", "bed", "..." ]
python
train
33.961538
majerteam/sqla_inspect
sqla_inspect/py3o.py
https://github.com/majerteam/sqla_inspect/blob/67edb5541e6a56b0a657d3774d1e19c1110cd402/sqla_inspect/py3o.py#L205-L226
def gen_xml_doc(self): """ Generate the text tags that should be inserted in the content.xml of a full model """ res = self.make_doc() var_tag = """ <text:user-field-decl office:value-type="string" office:string-value="%s" text:name="py3o.%s"/>""" text_tag = """<text:p text:style-name="P1"> <text:user-field-get text:name="py3o.%s">%s</text:user-field-get> </text:p> """ keys = res.keys() keys.sort() texts = "" vars = "" for key in keys: value = res[key] vars += var_tag % (value, key) texts += text_tag % (key, value) return CONTENT_TMPL % (vars, texts)
[ "def", "gen_xml_doc", "(", "self", ")", ":", "res", "=", "self", ".", "make_doc", "(", ")", "var_tag", "=", "\"\"\"\n <text:user-field-decl office:value-type=\"string\"\n office:string-value=\"%s\" text:name=\"py3o.%s\"/>\"\"\"", "text_tag", "=", "\"\"\"<text:p text:style-name=\"P1\">\n <text:user-field-get text:name=\"py3o.%s\">%s</text:user-field-get>\n </text:p>\n \"\"\"", "keys", "=", "res", ".", "keys", "(", ")", "keys", ".", "sort", "(", ")", "texts", "=", "\"\"", "vars", "=", "\"\"", "for", "key", "in", "keys", ":", "value", "=", "res", "[", "key", "]", "vars", "+=", "var_tag", "%", "(", "value", ",", "key", ")", "texts", "+=", "text_tag", "%", "(", "key", ",", "value", ")", "return", "CONTENT_TMPL", "%", "(", "vars", ",", "texts", ")" ]
Generate the text tags that should be inserted in the content.xml of a full model
[ "Generate", "the", "text", "tags", "that", "should", "be", "inserted", "in", "the", "content", ".", "xml", "of", "a", "full", "model" ]
python
train
32.681818
isambard-uob/ampal
src/ampal/dssp.py
https://github.com/isambard-uob/ampal/blob/906e2afacb435ffb129b381f262ff8e7bfb324c5/src/ampal/dssp.py#L111-L163
def find_ss_regions(dssp_residues, loop_assignments=(' ', 'B', 'S', 'T')): """Separates parsed DSSP data into groups of secondary structure. Notes ----- Example: all residues in a single helix/loop/strand will be gathered into a list, then the next secondary structure element will be gathered into a separate list, and so on. Parameters ---------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility Returns ------- fragments : [[list]] Lists grouped in continuous regions of secondary structure. Innermost list has the same format as above. """ loops = loop_assignments previous_ele = None fragment = [] fragments = [] for ele in dssp_residues: if previous_ele is None: fragment.append(ele) elif ele[2] != previous_ele[2]: fragments.append(fragment) fragment = [ele] elif previous_ele[1] in loops: if ele[1] in loops: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] else: if ele[1] == previous_ele[1]: fragment.append(ele) else: fragments.append(fragment) fragment = [ele] previous_ele = ele fragments.append(fragment) return fragments
[ "def", "find_ss_regions", "(", "dssp_residues", ",", "loop_assignments", "=", "(", "' '", ",", "'B'", ",", "'S'", ",", "'T'", ")", ")", ":", "loops", "=", "loop_assignments", "previous_ele", "=", "None", "fragment", "=", "[", "]", "fragments", "=", "[", "]", "for", "ele", "in", "dssp_residues", ":", "if", "previous_ele", "is", "None", ":", "fragment", ".", "append", "(", "ele", ")", "elif", "ele", "[", "2", "]", "!=", "previous_ele", "[", "2", "]", ":", "fragments", ".", "append", "(", "fragment", ")", "fragment", "=", "[", "ele", "]", "elif", "previous_ele", "[", "1", "]", "in", "loops", ":", "if", "ele", "[", "1", "]", "in", "loops", ":", "fragment", ".", "append", "(", "ele", ")", "else", ":", "fragments", ".", "append", "(", "fragment", ")", "fragment", "=", "[", "ele", "]", "else", ":", "if", "ele", "[", "1", "]", "==", "previous_ele", "[", "1", "]", ":", "fragment", ".", "append", "(", "ele", ")", "else", ":", "fragments", ".", "append", "(", "fragment", ")", "fragment", "=", "[", "ele", "]", "previous_ele", "=", "ele", "fragments", ".", "append", "(", "fragment", ")", "return", "fragments" ]
Separates parsed DSSP data into groups of secondary structure. Notes ----- Example: all residues in a single helix/loop/strand will be gathered into a list, then the next secondary structure element will be gathered into a separate list, and so on. Parameters ---------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility Returns ------- fragments : [[list]] Lists grouped in continuous regions of secondary structure. Innermost list has the same format as above.
[ "Separates", "parsed", "DSSP", "data", "into", "groups", "of", "secondary", "structure", "." ]
python
train
30.45283
alphatwirl/alphatwirl
alphatwirl/selection/factories/expand.py
https://github.com/alphatwirl/alphatwirl/blob/5138eeba6cd8a334ba52d6c2c022b33c61e3ba38/alphatwirl/selection/factories/expand.py#L4-L20
def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }): """expand a path config Args: path_cfg (str, tuple, dict): a config for path alias_dict (dict): a dict for aliases overriding_kargs (dict): to be used for recursive call """ if isinstance(path_cfg, str): return _expand_str(path_cfg, alias_dict, overriding_kargs) if isinstance(path_cfg, dict): return _expand_dict(path_cfg, alias_dict) # assume tuple or list return _expand_tuple(path_cfg, alias_dict, overriding_kargs)
[ "def", "expand_path_cfg", "(", "path_cfg", ",", "alias_dict", "=", "{", "}", ",", "overriding_kargs", "=", "{", "}", ")", ":", "if", "isinstance", "(", "path_cfg", ",", "str", ")", ":", "return", "_expand_str", "(", "path_cfg", ",", "alias_dict", ",", "overriding_kargs", ")", "if", "isinstance", "(", "path_cfg", ",", "dict", ")", ":", "return", "_expand_dict", "(", "path_cfg", ",", "alias_dict", ")", "# assume tuple or list", "return", "_expand_tuple", "(", "path_cfg", ",", "alias_dict", ",", "overriding_kargs", ")" ]
expand a path config Args: path_cfg (str, tuple, dict): a config for path alias_dict (dict): a dict for aliases overriding_kargs (dict): to be used for recursive call
[ "expand", "a", "path", "config" ]
python
valid
32
jxtech/wechatpy
wechatpy/client/api/customservice.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/customservice.py#L92-L110
def upload_headimg(self, account, media_file): """ 上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包 """ return self._post( 'https://api.weixin.qq.com/customservice/kfaccount/uploadheadimg', params={ 'kf_account': account }, files={ 'media': media_file } )
[ "def", "upload_headimg", "(", "self", ",", "account", ",", "media_file", ")", ":", "return", "self", ".", "_post", "(", "'https://api.weixin.qq.com/customservice/kfaccount/uploadheadimg'", ",", "params", "=", "{", "'kf_account'", ":", "account", "}", ",", "files", "=", "{", "'media'", ":", "media_file", "}", ")" ]
上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包
[ "上传客服账号头像", "详情请参考", "http", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki", "/", "1", "/", "70a29afed17f56d537c833f89be979c9", ".", "html" ]
python
train
28.052632
numenta/nupic
src/nupic/algorithms/utils.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/utils.py#L86-L103
def transferCoincidences(network, fromElementName, toElementName): """ Gets the coincidence matrix from one element and sets it on another element (using locked handles, a la nupic.bindings.research.lockHandle). TODO: Generalize to more node types, parameter name pairs, etc. Does not work across processes. """ coincidenceHandle = getLockedHandle( runtimeElement=network.getElement(fromElementName), # TODO: Re-purpose for use with nodes other than PMXClassifierNode. expression="self._cd._W" ) network.getElement(toElementName).setParameter("coincidencesAbove", coincidenceHandle)
[ "def", "transferCoincidences", "(", "network", ",", "fromElementName", ",", "toElementName", ")", ":", "coincidenceHandle", "=", "getLockedHandle", "(", "runtimeElement", "=", "network", ".", "getElement", "(", "fromElementName", ")", ",", "# TODO: Re-purpose for use with nodes other than PMXClassifierNode.", "expression", "=", "\"self._cd._W\"", ")", "network", ".", "getElement", "(", "toElementName", ")", ".", "setParameter", "(", "\"coincidencesAbove\"", ",", "coincidenceHandle", ")" ]
Gets the coincidence matrix from one element and sets it on another element (using locked handles, a la nupic.bindings.research.lockHandle). TODO: Generalize to more node types, parameter name pairs, etc. Does not work across processes.
[ "Gets", "the", "coincidence", "matrix", "from", "one", "element", "and", "sets", "it", "on", "another", "element", "(", "using", "locked", "handles", "a", "la", "nupic", ".", "bindings", ".", "research", ".", "lockHandle", ")", "." ]
python
valid
34.111111
chromy/essence
src/essence/world.py
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L56-L72
def add_component(self, entity, component): """Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`""" component_type = type(component) relation = self._get_relation(component_type) if entity in relation: # PYTHON2.6: Numbers required in format string. msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type) raise DuplicateComponentError(msg) relation[entity] = component self._entities_with(component_type).add(entity)
[ "def", "add_component", "(", "self", ",", "entity", ",", "component", ")", ":", "component_type", "=", "type", "(", "component", ")", "relation", "=", "self", ".", "_get_relation", "(", "component_type", ")", "if", "entity", "in", "relation", ":", "# PYTHON2.6: Numbers required in format string.", "msg", "=", "\"Component {0} can't be added to entity {1} since it already has a component of type {2}.\"", ".", "format", "(", "component", ",", "entity", ",", "component_type", ")", "raise", "DuplicateComponentError", "(", "msg", ")", "relation", "[", "entity", "]", "=", "component", "self", ".", "_entities_with", "(", "component_type", ")", ".", "add", "(", "entity", ")" ]
Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`
[ "Add", "component", "to", "entity", "." ]
python
train
46.647059
hazelcast/hazelcast-python-client
hazelcast/protocol/codec/multi_map_add_entry_listener_codec.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/protocol/codec/multi_map_add_entry_listener_codec.py#L11-L17
def calculate_size(name, include_value, local_only): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += BOOLEAN_SIZE_IN_BYTES data_size += BOOLEAN_SIZE_IN_BYTES return data_size
[ "def", "calculate_size", "(", "name", ",", "include_value", ",", "local_only", ")", ":", "data_size", "=", "0", "data_size", "+=", "calculate_size_str", "(", "name", ")", "data_size", "+=", "BOOLEAN_SIZE_IN_BYTES", "data_size", "+=", "BOOLEAN_SIZE_IN_BYTES", "return", "data_size" ]
Calculates the request payload size
[ "Calculates", "the", "request", "payload", "size" ]
python
train
36
ejeschke/ginga
ginga/rv/plugins/PlotTable.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/PlotTable.py#L292-L329
def _get_plot_data(self): """Extract only good data point for plotting.""" _marker_type = self.settings.get('markerstyle', 'o') if self.x_col == self._idxname: x_data = self._idx else: x_data = self.tab[self.x_col].data if self.y_col == self._idxname: y_data = self._idx else: y_data = self.tab[self.y_col].data if self.tab.masked: if self.x_col == self._idxname: x_mask = np.ones_like(self._idx, dtype=np.bool) else: x_mask = ~self.tab[self.x_col].mask if self.y_col == self._idxname: y_mask = np.ones_like(self._idx, dtype=np.bool) else: y_mask = ~self.tab[self.y_col].mask mask = x_mask & y_mask x_data = x_data[mask] y_data = y_data[mask] if len(x_data) > 1: i = np.argsort(x_data) # Sort X-axis to avoid messy line plot x_data = x_data[i] y_data = y_data[i] if not self.w.show_marker.get_state(): _marker_type = None return x_data, y_data, _marker_type
[ "def", "_get_plot_data", "(", "self", ")", ":", "_marker_type", "=", "self", ".", "settings", ".", "get", "(", "'markerstyle'", ",", "'o'", ")", "if", "self", ".", "x_col", "==", "self", ".", "_idxname", ":", "x_data", "=", "self", ".", "_idx", "else", ":", "x_data", "=", "self", ".", "tab", "[", "self", ".", "x_col", "]", ".", "data", "if", "self", ".", "y_col", "==", "self", ".", "_idxname", ":", "y_data", "=", "self", ".", "_idx", "else", ":", "y_data", "=", "self", ".", "tab", "[", "self", ".", "y_col", "]", ".", "data", "if", "self", ".", "tab", ".", "masked", ":", "if", "self", ".", "x_col", "==", "self", ".", "_idxname", ":", "x_mask", "=", "np", ".", "ones_like", "(", "self", ".", "_idx", ",", "dtype", "=", "np", ".", "bool", ")", "else", ":", "x_mask", "=", "~", "self", ".", "tab", "[", "self", ".", "x_col", "]", ".", "mask", "if", "self", ".", "y_col", "==", "self", ".", "_idxname", ":", "y_mask", "=", "np", ".", "ones_like", "(", "self", ".", "_idx", ",", "dtype", "=", "np", ".", "bool", ")", "else", ":", "y_mask", "=", "~", "self", ".", "tab", "[", "self", ".", "y_col", "]", ".", "mask", "mask", "=", "x_mask", "&", "y_mask", "x_data", "=", "x_data", "[", "mask", "]", "y_data", "=", "y_data", "[", "mask", "]", "if", "len", "(", "x_data", ")", ">", "1", ":", "i", "=", "np", ".", "argsort", "(", "x_data", ")", "# Sort X-axis to avoid messy line plot", "x_data", "=", "x_data", "[", "i", "]", "y_data", "=", "y_data", "[", "i", "]", "if", "not", "self", ".", "w", ".", "show_marker", ".", "get_state", "(", ")", ":", "_marker_type", "=", "None", "return", "x_data", ",", "y_data", ",", "_marker_type" ]
Extract only good data point for plotting.
[ "Extract", "only", "good", "data", "point", "for", "plotting", "." ]
python
train
30.552632
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L1734-L1750
def file_saved_in_other_editorstack(self, original_filename, filename): """ File was just saved in another editorstack, let's synchronize! This avoids file being automatically reloaded. The original filename is passed instead of an index in case the tabs on the editor stacks were moved and are now in a different order - see issue 5703. Filename is passed in case file was just saved as another name. """ index = self.has_filename(original_filename) if index is None: return finfo = self.data[index] finfo.newly_created = False finfo.filename = to_text_string(filename) finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
[ "def", "file_saved_in_other_editorstack", "(", "self", ",", "original_filename", ",", "filename", ")", ":", "index", "=", "self", ".", "has_filename", "(", "original_filename", ")", "if", "index", "is", "None", ":", "return", "finfo", "=", "self", ".", "data", "[", "index", "]", "finfo", ".", "newly_created", "=", "False", "finfo", ".", "filename", "=", "to_text_string", "(", "filename", ")", "finfo", ".", "lastmodified", "=", "QFileInfo", "(", "finfo", ".", "filename", ")", ".", "lastModified", "(", ")" ]
File was just saved in another editorstack, let's synchronize! This avoids file being automatically reloaded. The original filename is passed instead of an index in case the tabs on the editor stacks were moved and are now in a different order - see issue 5703. Filename is passed in case file was just saved as another name.
[ "File", "was", "just", "saved", "in", "another", "editorstack", "let", "s", "synchronize!", "This", "avoids", "file", "being", "automatically", "reloaded", ".", "The", "original", "filename", "is", "passed", "instead", "of", "an", "index", "in", "case", "the", "tabs", "on", "the", "editor", "stacks", "were", "moved", "and", "are", "now", "in", "a", "different", "order", "-", "see", "issue", "5703", ".", "Filename", "is", "passed", "in", "case", "file", "was", "just", "saved", "as", "another", "name", "." ]
python
train
44.529412
OSSOS/MOP
src/ossos/core/ossos/wcs.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/wcs.py#L389-L425
def parse_pv(header): """ Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10. """ order_fit = parse_order_fit(header) def parse_with_base(i): key_base = "PV%d_" % i pvi_x = [header[key_base + "0"]] def parse_range(lower, upper): for j in range(lower, upper + 1): pvi_x.append(header[key_base + str(j)]) if order_fit >= 1: parse_range(1, 3) if order_fit >= 2: parse_range(4, 6) if order_fit >= 3: parse_range(7, 10) return pvi_x return [parse_with_base(1), parse_with_base(2)]
[ "def", "parse_pv", "(", "header", ")", ":", "order_fit", "=", "parse_order_fit", "(", "header", ")", "def", "parse_with_base", "(", "i", ")", ":", "key_base", "=", "\"PV%d_\"", "%", "i", "pvi_x", "=", "[", "header", "[", "key_base", "+", "\"0\"", "]", "]", "def", "parse_range", "(", "lower", ",", "upper", ")", ":", "for", "j", "in", "range", "(", "lower", ",", "upper", "+", "1", ")", ":", "pvi_x", ".", "append", "(", "header", "[", "key_base", "+", "str", "(", "j", ")", "]", ")", "if", "order_fit", ">=", "1", ":", "parse_range", "(", "1", ",", "3", ")", "if", "order_fit", ">=", "2", ":", "parse_range", "(", "4", ",", "6", ")", "if", "order_fit", ">=", "3", ":", "parse_range", "(", "7", ",", "10", ")", "return", "pvi_x", "return", "[", "parse_with_base", "(", "1", ")", ",", "parse_with_base", "(", "2", ")", "]" ]
Parses the PV array from an astropy FITS header. Args: header: astropy.io.fits.header.Header The header containing the PV values. Returns: cd: 2d array (list(list(float)) [[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]] Note that N depends on the order of the fit. For example, an order 3 fit goes up to PV?_10.
[ "Parses", "the", "PV", "array", "from", "an", "astropy", "FITS", "header", "." ]
python
train
24.648649
wummel/dosage
scripts/comicgenesis.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/comicgenesis.py#L371-L407
def handle_url(url, session, res): """Parse one search result page.""" print("Parsing", url, file=sys.stderr) try: data = getPageContent(url, session) except IOError as msg: print("ERROR:", msg, file=sys.stderr) return for match in url_matcher.finditer(data): url = match.group(1) + '/' name = unescape(match.group(2)) name = asciify(name.replace('&', 'And').replace('@', 'At')) name = capfirst(name) if name in exclude_comics: continue if contains_case_insensitive(res, name): # we cannot handle two comics that only differ in case print("INFO: skipping possible duplicate", repr(name), file=sys.stderr) continue # find out how many images this comic has end = match.end() mo = num_matcher.search(data[end:]) if not mo: print("ERROR:", repr(data[end:end+300]), file=sys.stderr) continue num = int(mo.group(1)) url = url_overrides.get(name, url) try: if "/d/" not in url: check_robotstxt(url+"d/", session) else: check_robotstxt(url, session) except IOError: print("INFO: robots.txt denied for comicgenesis", repr(name)) continue else: res[name] = (url, num)
[ "def", "handle_url", "(", "url", ",", "session", ",", "res", ")", ":", "print", "(", "\"Parsing\"", ",", "url", ",", "file", "=", "sys", ".", "stderr", ")", "try", ":", "data", "=", "getPageContent", "(", "url", ",", "session", ")", "except", "IOError", "as", "msg", ":", "print", "(", "\"ERROR:\"", ",", "msg", ",", "file", "=", "sys", ".", "stderr", ")", "return", "for", "match", "in", "url_matcher", ".", "finditer", "(", "data", ")", ":", "url", "=", "match", ".", "group", "(", "1", ")", "+", "'/'", "name", "=", "unescape", "(", "match", ".", "group", "(", "2", ")", ")", "name", "=", "asciify", "(", "name", ".", "replace", "(", "'&'", ",", "'And'", ")", ".", "replace", "(", "'@'", ",", "'At'", ")", ")", "name", "=", "capfirst", "(", "name", ")", "if", "name", "in", "exclude_comics", ":", "continue", "if", "contains_case_insensitive", "(", "res", ",", "name", ")", ":", "# we cannot handle two comics that only differ in case", "print", "(", "\"INFO: skipping possible duplicate\"", ",", "repr", "(", "name", ")", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "# find out how many images this comic has", "end", "=", "match", ".", "end", "(", ")", "mo", "=", "num_matcher", ".", "search", "(", "data", "[", "end", ":", "]", ")", "if", "not", "mo", ":", "print", "(", "\"ERROR:\"", ",", "repr", "(", "data", "[", "end", ":", "end", "+", "300", "]", ")", ",", "file", "=", "sys", ".", "stderr", ")", "continue", "num", "=", "int", "(", "mo", ".", "group", "(", "1", ")", ")", "url", "=", "url_overrides", ".", "get", "(", "name", ",", "url", ")", "try", ":", "if", "\"/d/\"", "not", "in", "url", ":", "check_robotstxt", "(", "url", "+", "\"d/\"", ",", "session", ")", "else", ":", "check_robotstxt", "(", "url", ",", "session", ")", "except", "IOError", ":", "print", "(", "\"INFO: robots.txt denied for comicgenesis\"", ",", "repr", "(", "name", ")", ")", "continue", "else", ":", "res", "[", "name", "]", "=", "(", "url", ",", "num", ")" ]
Parse one search result page.
[ "Parse", "one", "search", "result", "page", "." ]
python
train
36.486486
angr/angr
angr/state_plugins/posix.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/posix.py#L553-L569
def dumps(self, fd, **kwargs): """ Returns the concrete content for a file descriptor. BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout, or stderr as a flat string. :param fd: A file descriptor. :return: The concrete content. :rtype: str """ if 0 <= fd <= 2: data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs) if type(data) is list: data = b''.join(data) return data return self.get_fd(fd).concretize(**kwargs)
[ "def", "dumps", "(", "self", ",", "fd", ",", "*", "*", "kwargs", ")", ":", "if", "0", "<=", "fd", "<=", "2", ":", "data", "=", "[", "self", ".", "stdin", ",", "self", ".", "stdout", ",", "self", ".", "stderr", "]", "[", "fd", "]", ".", "concretize", "(", "*", "*", "kwargs", ")", "if", "type", "(", "data", ")", "is", "list", ":", "data", "=", "b''", ".", "join", "(", "data", ")", "return", "data", "return", "self", ".", "get_fd", "(", "fd", ")", ".", "concretize", "(", "*", "*", "kwargs", ")" ]
Returns the concrete content for a file descriptor. BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout, or stderr as a flat string. :param fd: A file descriptor. :return: The concrete content. :rtype: str
[ "Returns", "the", "concrete", "content", "for", "a", "file", "descriptor", "." ]
python
train
36.235294
hydpy-dev/hydpy
hydpy/models/hland/hland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/hland/hland_model.py#L2184-L2231
def calc_qt_v1(self): """Calculate the total discharge after possible abstractions. Required control parameter: |Abstr| Required flux sequence: |OutUH| Calculated flux sequence: |QT| Basic equation: :math:`QT = max(OutUH - Abstr, 0)` Examples: Trying to abstract less then available, as much as available and less then available results in: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> abstr(2.0) >>> fluxes.outuh = 2.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(1.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) >>> fluxes.outuh = 0.5 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) Note that "negative abstractions" are allowed: >>> abstr(-2.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(2.0) """ con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess flu.qt = max(flu.outuh-con.abstr, 0.)
[ "def", "calc_qt_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "flu", ".", "qt", "=", "max", "(", "flu", ".", "outuh", "-", "con", ".", "abstr", ",", "0.", ")" ]
Calculate the total discharge after possible abstractions. Required control parameter: |Abstr| Required flux sequence: |OutUH| Calculated flux sequence: |QT| Basic equation: :math:`QT = max(OutUH - Abstr, 0)` Examples: Trying to abstract less then available, as much as available and less then available results in: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> abstr(2.0) >>> fluxes.outuh = 2.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(1.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) >>> fluxes.outuh = 0.5 >>> model.calc_qt_v1() >>> fluxes.qt qt(0.0) Note that "negative abstractions" are allowed: >>> abstr(-2.0) >>> fluxes.outuh = 1.0 >>> model.calc_qt_v1() >>> fluxes.qt qt(2.0)
[ "Calculate", "the", "total", "discharge", "after", "possible", "abstractions", "." ]
python
train
23.3125
BernardFW/bernard
src/bernard/layers/stack.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L77-L89
def _make_index(self): """ Perform the index computation. It groups layers by type into a dictionary, to allow quick access. """ out = {} for layer in self._layers: cls = layer.__class__ out[cls] = out.get(cls, []) + [layer] return out
[ "def", "_make_index", "(", "self", ")", ":", "out", "=", "{", "}", "for", "layer", "in", "self", ".", "_layers", ":", "cls", "=", "layer", ".", "__class__", "out", "[", "cls", "]", "=", "out", ".", "get", "(", "cls", ",", "[", "]", ")", "+", "[", "layer", "]", "return", "out" ]
Perform the index computation. It groups layers by type into a dictionary, to allow quick access.
[ "Perform", "the", "index", "computation", ".", "It", "groups", "layers", "by", "type", "into", "a", "dictionary", "to", "allow", "quick", "access", "." ]
python
train
23.538462
Carreau/telemetry
telemetry/__init__.py
https://github.com/Carreau/telemetry/blob/6d456e982e3d7fd4eb6a8f43cd94925bb69ab855/telemetry/__init__.py#L47-L55
def collect_basic_info(): """ collect basic info about the system, os, python version... """ s = sys.version_info _collect(json.dumps({'sys.version_info':tuple(s)})) _collect(sys.version) return sys.version
[ "def", "collect_basic_info", "(", ")", ":", "s", "=", "sys", ".", "version_info", "_collect", "(", "json", ".", "dumps", "(", "{", "'sys.version_info'", ":", "tuple", "(", "s", ")", "}", ")", ")", "_collect", "(", "sys", ".", "version", ")", "return", "sys", ".", "version" ]
collect basic info about the system, os, python version...
[ "collect", "basic", "info", "about", "the", "system", "os", "python", "version", "..." ]
python
train
25.222222
wglass/zoonado
zoonado/connection.py
https://github.com/wglass/zoonado/blob/8f143b3dd26be88131356f731e7ca51809bc69cb/zoonado/connection.py#L196-L215
def abort(self, exception=exc.ConnectError): """ Aborts a connection and puts all pending futures into an error state. If ``sys.exc_info()`` is set (i.e. this is being called in an exception handler) then pending futures will have that exc info set. Otherwise the given ``exception`` parameter is used (defaults to ``ConnectError``). """ log.warn("Aborting connection to %s:%s", self.host, self.port) def abort_pending(f): exc_info = sys.exc_info() if any(exc_info): f.set_exc_info(exc_info) else: f.set_exception(exception(self.host, self.port)) for pending in self.drain_all_pending(): abort_pending(pending)
[ "def", "abort", "(", "self", ",", "exception", "=", "exc", ".", "ConnectError", ")", ":", "log", ".", "warn", "(", "\"Aborting connection to %s:%s\"", ",", "self", ".", "host", ",", "self", ".", "port", ")", "def", "abort_pending", "(", "f", ")", ":", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "if", "any", "(", "exc_info", ")", ":", "f", ".", "set_exc_info", "(", "exc_info", ")", "else", ":", "f", ".", "set_exception", "(", "exception", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "for", "pending", "in", "self", ".", "drain_all_pending", "(", ")", ":", "abort_pending", "(", "pending", ")" ]
Aborts a connection and puts all pending futures into an error state. If ``sys.exc_info()`` is set (i.e. this is being called in an exception handler) then pending futures will have that exc info set. Otherwise the given ``exception`` parameter is used (defaults to ``ConnectError``).
[ "Aborts", "a", "connection", "and", "puts", "all", "pending", "futures", "into", "an", "error", "state", "." ]
python
train
37.75
hammerlab/cohorts
cohorts/cohort.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L538-L570
def _hash_filter_fn(self, filter_fn, **kwargs): """ Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values """ filter_fn_name = self._get_function_name(filter_fn, default="filter-none") logger.debug("Computing hash for filter_fn: {} with kwargs {}".format(filter_fn_name, str(dict(**kwargs)))) # hash function source code fn_source = str(dill.source.getsource(filter_fn)) pickled_fn_source = pickle.dumps(fn_source) ## encode as byte string hashed_fn_source = int(hashlib.sha1(pickled_fn_source).hexdigest(), 16) % (10 ** 11) # hash kwarg values kw_dict = dict(**kwargs) kw_hash = list() if not kw_dict: kw_hash = ["default"] else: [kw_hash.append("{}-{}".format(key, h)) for (key, h) in sorted(kw_dict.items())] # hash closure vars - for case where filter_fn is defined within closure of filter_fn closure = [] nonlocals = inspect.getclosurevars(filter_fn).nonlocals for (key, val) in nonlocals.items(): ## capture hash for any function within closure if inspect.isfunction(val): closure.append(self._hash_filter_fn(val)) closure.sort() # Sorted for file name consistency closure_str = "null" if len(closure) == 0 else "-".join(closure) # construct final string comprising hashed components hashed_fn = ".".join(["-".join([filter_fn_name, str(hashed_fn_source)]), ".".join(kw_hash), closure_str] ) return hashed_fn
[ "def", "_hash_filter_fn", "(", "self", ",", "filter_fn", ",", "*", "*", "kwargs", ")", ":", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ",", "default", "=", "\"filter-none\"", ")", "logger", ".", "debug", "(", "\"Computing hash for filter_fn: {} with kwargs {}\"", ".", "format", "(", "filter_fn_name", ",", "str", "(", "dict", "(", "*", "*", "kwargs", ")", ")", ")", ")", "# hash function source code", "fn_source", "=", "str", "(", "dill", ".", "source", ".", "getsource", "(", "filter_fn", ")", ")", "pickled_fn_source", "=", "pickle", ".", "dumps", "(", "fn_source", ")", "## encode as byte string", "hashed_fn_source", "=", "int", "(", "hashlib", ".", "sha1", "(", "pickled_fn_source", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "(", "10", "**", "11", ")", "# hash kwarg values", "kw_dict", "=", "dict", "(", "*", "*", "kwargs", ")", "kw_hash", "=", "list", "(", ")", "if", "not", "kw_dict", ":", "kw_hash", "=", "[", "\"default\"", "]", "else", ":", "[", "kw_hash", ".", "append", "(", "\"{}-{}\"", ".", "format", "(", "key", ",", "h", ")", ")", "for", "(", "key", ",", "h", ")", "in", "sorted", "(", "kw_dict", ".", "items", "(", ")", ")", "]", "# hash closure vars - for case where filter_fn is defined within closure of filter_fn", "closure", "=", "[", "]", "nonlocals", "=", "inspect", ".", "getclosurevars", "(", "filter_fn", ")", ".", "nonlocals", "for", "(", "key", ",", "val", ")", "in", "nonlocals", ".", "items", "(", ")", ":", "## capture hash for any function within closure", "if", "inspect", ".", "isfunction", "(", "val", ")", ":", "closure", ".", "append", "(", "self", ".", "_hash_filter_fn", "(", "val", ")", ")", "closure", ".", "sort", "(", ")", "# Sorted for file name consistency", "closure_str", "=", "\"null\"", "if", "len", "(", "closure", ")", "==", "0", "else", "\"-\"", ".", "join", "(", "closure", ")", "# construct final string comprising hashed components", "hashed_fn", "=", "\".\"", ".", "join", "(", "[", "\"-\"", ".", "join", "(", "[", "filter_fn_name", ",", "str", "(", "hashed_fn_source", ")", "]", ")", ",", "\".\"", ".", "join", "(", "kw_hash", ")", ",", "closure_str", "]", ")", "return", "hashed_fn" ]
Construct string representing state of filter_fn Used to cache filtered variants or effects uniquely depending on filter fn values
[ "Construct", "string", "representing", "state", "of", "filter_fn", "Used", "to", "cache", "filtered", "variants", "or", "effects", "uniquely", "depending", "on", "filter", "fn", "values" ]
python
train
52.69697
mweb/appconfig
appconfig/appconfig.py
https://github.com/mweb/appconfig/blob/780c1fe3b2f537463a46e335186b7741add88a1e/appconfig/appconfig.py#L370-L407
def _format_message(value, line_length, indent="", first_indent=None): ''' Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines ''' if indent.find('\t'): indent = indent.replace('\t', ' ') result = [] if first_indent is None: first_indent = indent cindent = first_indent tmp = "*" * line_length for ele in value.split(' '): if ele.find('\t') >= 0: ele = ele.replace('\t', ' ') if (len(ele) + len(tmp)) >= line_length: result.append(tmp) tmp = '{0}{1}'.format(cindent, ele) cindent = indent else: tmp = "{0} {1}".format(tmp, ele) result.append(tmp) result = result[1:] return "\n".join(result)
[ "def", "_format_message", "(", "value", ",", "line_length", ",", "indent", "=", "\"\"", ",", "first_indent", "=", "None", ")", ":", "if", "indent", ".", "find", "(", "'\\t'", ")", ":", "indent", "=", "indent", ".", "replace", "(", "'\\t'", ",", "' '", ")", "result", "=", "[", "]", "if", "first_indent", "is", "None", ":", "first_indent", "=", "indent", "cindent", "=", "first_indent", "tmp", "=", "\"*\"", "*", "line_length", "for", "ele", "in", "value", ".", "split", "(", "' '", ")", ":", "if", "ele", ".", "find", "(", "'\\t'", ")", ">=", "0", ":", "ele", "=", "ele", ".", "replace", "(", "'\\t'", ",", "' '", ")", "if", "(", "len", "(", "ele", ")", "+", "len", "(", "tmp", ")", ")", ">=", "line_length", ":", "result", ".", "append", "(", "tmp", ")", "tmp", "=", "'{0}{1}'", ".", "format", "(", "cindent", ",", "ele", ")", "cindent", "=", "indent", "else", ":", "tmp", "=", "\"{0} {1}\"", ".", "format", "(", "tmp", ",", "ele", ")", "result", ".", "append", "(", "tmp", ")", "result", "=", "result", "[", "1", ":", "]", "return", "\"\\n\"", ".", "join", "(", "result", ")" ]
Return a string with newlines so that the given string fits into this line length. At the start of the line the indent is added. This can be used for commenting the message out within a file or to indent your text. All \\t will be replaced with 4 spaces. @param value: The string to get as a commented multiline comment. @param line_length: The length of the line to fill. @param indent: The indent to use for printing or charcter to put in front @param first_indent: The first indent might be shorter. If None then the first line uses the normal indent as the rest of the string. @return: The string with newlines
[ "Return", "a", "string", "with", "newlines", "so", "that", "the", "given", "string", "fits", "into", "this", "line", "length", ".", "At", "the", "start", "of", "the", "line", "the", "indent", "is", "added", ".", "This", "can", "be", "used", "for", "commenting", "the", "message", "out", "within", "a", "file", "or", "to", "indent", "your", "text", "." ]
python
train
36.605263
HolmesNL/confidence
confidence/models.py
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/models.py#L113-L161
def get(self, path, default=_NoDefault, as_type=None, resolve_references=True): """ Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved """ value = self._source steps_taken = [] try: # walk through the values dictionary for step in path.split(self._separator): steps_taken.append(step) value = value[step] if as_type: return as_type(value) elif isinstance(value, Mapping): # create an instance of our current type, copying 'configured' properties / policies namespace = type(self)(separator=self._separator, missing=self._missing) namespace._source = value # carry the root object from namespace to namespace, references are always resolved from root namespace._root = self._root return namespace elif resolve_references and isinstance(value, str): # only resolve references in str-type values (the only way they can be expressed) return self._resolve(value) else: return value except ConfiguredReferenceError: # also a KeyError, but this one should bubble to caller raise except KeyError as e: if default is not _NoDefault: return default else: missing_key = self._separator.join(steps_taken) raise NotConfiguredError('no configuration for key {}'.format(missing_key), key=missing_key) from e
[ "def", "get", "(", "self", ",", "path", ",", "default", "=", "_NoDefault", ",", "as_type", "=", "None", ",", "resolve_references", "=", "True", ")", ":", "value", "=", "self", ".", "_source", "steps_taken", "=", "[", "]", "try", ":", "# walk through the values dictionary", "for", "step", "in", "path", ".", "split", "(", "self", ".", "_separator", ")", ":", "steps_taken", ".", "append", "(", "step", ")", "value", "=", "value", "[", "step", "]", "if", "as_type", ":", "return", "as_type", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "Mapping", ")", ":", "# create an instance of our current type, copying 'configured' properties / policies", "namespace", "=", "type", "(", "self", ")", "(", "separator", "=", "self", ".", "_separator", ",", "missing", "=", "self", ".", "_missing", ")", "namespace", ".", "_source", "=", "value", "# carry the root object from namespace to namespace, references are always resolved from root", "namespace", ".", "_root", "=", "self", ".", "_root", "return", "namespace", "elif", "resolve_references", "and", "isinstance", "(", "value", ",", "str", ")", ":", "# only resolve references in str-type values (the only way they can be expressed)", "return", "self", ".", "_resolve", "(", "value", ")", "else", ":", "return", "value", "except", "ConfiguredReferenceError", ":", "# also a KeyError, but this one should bubble to caller", "raise", "except", "KeyError", "as", "e", ":", "if", "default", "is", "not", "_NoDefault", ":", "return", "default", "else", ":", "missing_key", "=", "self", ".", "_separator", ".", "join", "(", "steps_taken", ")", "raise", "NotConfiguredError", "(", "'no configuration for key {}'", ".", "format", "(", "missing_key", ")", ",", "key", "=", "missing_key", ")", "from", "e" ]
Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved
[ "Gets", "a", "value", "for", "the", "specified", "path", "." ]
python
train
49.673469
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1909-L1920
def _from_binary_ace_header(cls, binary_stream): """See base class.""" ''' ACE Type - 1 ACE Control flags - 1 Size - 2 (includes header size) ''' type, control_flags, size = cls._REPR.unpack(binary_stream) nw_obj = cls((ACEType(type), ACEControlFlags(control_flags), size)) _MOD_LOGGER.debug("Attempted to unpack ACE Header from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
[ "def", "_from_binary_ace_header", "(", "cls", ",", "binary_stream", ")", ":", "''' ACE Type - 1\n ACE Control flags - 1\n Size - 2 (includes header size)\n '''", "type", ",", "control_flags", ",", "size", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_stream", ")", "nw_obj", "=", "cls", "(", "(", "ACEType", "(", "type", ")", ",", "ACEControlFlags", "(", "control_flags", ")", ",", "size", ")", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack ACE Header from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "nw_obj", ")", "return", "nw_obj" ]
See base class.
[ "See", "base", "class", "." ]
python
train
35.916667
mozillazg/python-pinyin
pypinyin/core.py
https://github.com/mozillazg/python-pinyin/blob/b44756c852e0d2f50f251e3098cbbfef51774979/pypinyin/core.py#L155-L179
def phrase_pinyin(phrase, style, heteronym, errors='default', strict=True): """词语拼音转换. :param phrase: 词语 :param errors: 指定如何处理没有拼音的字符 :param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母 :return: 拼音列表 :rtype: list """ py = [] if phrase in PHRASES_DICT: py = deepcopy(PHRASES_DICT[phrase]) for idx, item in enumerate(py): if heteronym: py[idx] = _remove_dup_items([ _to_fixed(x, style=style, strict=strict) for x in item]) else: py[idx] = [_to_fixed(item[0], style=style, strict=strict)] else: for i in phrase: single = single_pinyin(i, style=style, heteronym=heteronym, errors=errors, strict=strict) if single: py.extend(single) return py
[ "def", "phrase_pinyin", "(", "phrase", ",", "style", ",", "heteronym", ",", "errors", "=", "'default'", ",", "strict", "=", "True", ")", ":", "py", "=", "[", "]", "if", "phrase", "in", "PHRASES_DICT", ":", "py", "=", "deepcopy", "(", "PHRASES_DICT", "[", "phrase", "]", ")", "for", "idx", ",", "item", "in", "enumerate", "(", "py", ")", ":", "if", "heteronym", ":", "py", "[", "idx", "]", "=", "_remove_dup_items", "(", "[", "_to_fixed", "(", "x", ",", "style", "=", "style", ",", "strict", "=", "strict", ")", "for", "x", "in", "item", "]", ")", "else", ":", "py", "[", "idx", "]", "=", "[", "_to_fixed", "(", "item", "[", "0", "]", ",", "style", "=", "style", ",", "strict", "=", "strict", ")", "]", "else", ":", "for", "i", "in", "phrase", ":", "single", "=", "single_pinyin", "(", "i", ",", "style", "=", "style", ",", "heteronym", "=", "heteronym", ",", "errors", "=", "errors", ",", "strict", "=", "strict", ")", "if", "single", ":", "py", ".", "extend", "(", "single", ")", "return", "py" ]
词语拼音转换. :param phrase: 词语 :param errors: 指定如何处理没有拼音的字符 :param strict: 是否严格遵照《汉语拼音方案》来处理声母和韵母 :return: 拼音列表 :rtype: list
[ "词语拼音转换", "." ]
python
train
32.76
pandas-dev/pandas
pandas/core/dtypes/cast.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L486-L516
def maybe_infer_dtype_type(element): """Try to infer an object's dtype, for use in arithmetic ops Uses `element.dtype` if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a `.dtype` attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple("Foo", "dtype") >>> maybe_infer_dtype_type(Foo(np.dtype("i8"))) numpy.int64 """ tipo = None if hasattr(element, 'dtype'): tipo = element.dtype elif is_list_like(element): element = np.asarray(element) tipo = element.dtype return tipo
[ "def", "maybe_infer_dtype_type", "(", "element", ")", ":", "tipo", "=", "None", "if", "hasattr", "(", "element", ",", "'dtype'", ")", ":", "tipo", "=", "element", ".", "dtype", "elif", "is_list_like", "(", "element", ")", ":", "element", "=", "np", ".", "asarray", "(", "element", ")", "tipo", "=", "element", ".", "dtype", "return", "tipo" ]
Try to infer an object's dtype, for use in arithmetic ops Uses `element.dtype` if that's available. Objects implementing the iterator protocol are cast to a NumPy array, and from there the array's type is used. Parameters ---------- element : object Possibly has a `.dtype` attribute, and possibly the iterator protocol. Returns ------- tipo : type Examples -------- >>> from collections import namedtuple >>> Foo = namedtuple("Foo", "dtype") >>> maybe_infer_dtype_type(Foo(np.dtype("i8"))) numpy.int64
[ "Try", "to", "infer", "an", "object", "s", "dtype", "for", "use", "in", "arithmetic", "ops" ]
python
train
25.741935
razor-x/scipy-data_fitting
scipy_data_fitting/figure/plot.py
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/figure/plot.py#L103-L110
def add_ylabel(self, text=None): """ Add a label to the y-axis. """ y = self.fit.meta['dependent'] if not text: text = '$' + y['tex_symbol'] + r'$ $(\si{' + y['siunitx'] + r'})$' self.plt.set_ylabel(text)
[ "def", "add_ylabel", "(", "self", ",", "text", "=", "None", ")", ":", "y", "=", "self", ".", "fit", ".", "meta", "[", "'dependent'", "]", "if", "not", "text", ":", "text", "=", "'$'", "+", "y", "[", "'tex_symbol'", "]", "+", "r'$ $(\\si{'", "+", "y", "[", "'siunitx'", "]", "+", "r'})$'", "self", ".", "plt", ".", "set_ylabel", "(", "text", ")" ]
Add a label to the y-axis.
[ "Add", "a", "label", "to", "the", "y", "-", "axis", "." ]
python
train
32.25
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L682-L687
def publish_tcp(self, topic, data, **kwargs): """Use :meth:`NsqdTCPClient.publish` instead. .. deprecated:: 1.0.0 """ return self.__tcp_client.publish(topic, data, **kwargs)
[ "def", "publish_tcp", "(", "self", ",", "topic", ",", "data", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__tcp_client", ".", "publish", "(", "topic", ",", "data", ",", "*", "*", "kwargs", ")" ]
Use :meth:`NsqdTCPClient.publish` instead. .. deprecated:: 1.0.0
[ "Use", ":", "meth", ":", "NsqdTCPClient", ".", "publish", "instead", "." ]
python
train
33.5
zagaran/mongolia
mongolia/mongo_connection.py
https://github.com/zagaran/mongolia/blob/82c499345f0a8610c7289545e19f5f633e8a81c0/mongolia/mongo_connection.py#L64-L71
def connect(self, host=None, port=None, connect=False, **kwargs): """ Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__""" try: self.__connection = MongoClient(host=host, port=port, connect=connect, **kwargs) except (AutoReconnect, ConnectionFailure, ServerSelectionTimeoutError): raise DatabaseIsDownError("No mongod process is running.")
[ "def", "connect", "(", "self", ",", "host", "=", "None", ",", "port", "=", "None", ",", "connect", "=", "False", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "__connection", "=", "MongoClient", "(", "host", "=", "host", ",", "port", "=", "port", ",", "connect", "=", "connect", ",", "*", "*", "kwargs", ")", "except", "(", "AutoReconnect", ",", "ConnectionFailure", ",", "ServerSelectionTimeoutError", ")", ":", "raise", "DatabaseIsDownError", "(", "\"No mongod process is running.\"", ")" ]
Explicitly creates the MongoClient; this method must be used in order to specify a non-default host or port to the MongoClient. Takes arguments identical to MongoClient.__init__
[ "Explicitly", "creates", "the", "MongoClient", ";", "this", "method", "must", "be", "used", "in", "order", "to", "specify", "a", "non", "-", "default", "host", "or", "port", "to", "the", "MongoClient", ".", "Takes", "arguments", "identical", "to", "MongoClient", ".", "__init__" ]
python
train
66.5
hydraplatform/hydra-base
hydra_base/lib/template.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L71-L102
def get_types_by_attr(resource, template_id=None): """ Using the attributes of the resource, get all the types that this resource matches. @returns a dictionary, keyed on the template name, with the value being the list of type names which match the resources attributes. """ resource_type_templates = [] #Create a list of all of this resources attributes. attr_ids = [] for res_attr in resource.attributes: attr_ids.append(res_attr.attr_id) all_resource_attr_ids = set(attr_ids) all_types = db.DBSession.query(TemplateType).options(joinedload_all('typeattrs')).filter(TemplateType.resource_type==resource.ref_key) if template_id is not None: all_types = all_types.filter(TemplateType.template_id==template_id) all_types = all_types.all() #tmpl type attrs must be a subset of the resource's attrs for ttype in all_types: type_attr_ids = [] for typeattr in ttype.typeattrs: type_attr_ids.append(typeattr.attr_id) if set(type_attr_ids).issubset(all_resource_attr_ids): resource_type_templates.append(ttype) return resource_type_templates
[ "def", "get_types_by_attr", "(", "resource", ",", "template_id", "=", "None", ")", ":", "resource_type_templates", "=", "[", "]", "#Create a list of all of this resources attributes.", "attr_ids", "=", "[", "]", "for", "res_attr", "in", "resource", ".", "attributes", ":", "attr_ids", ".", "append", "(", "res_attr", ".", "attr_id", ")", "all_resource_attr_ids", "=", "set", "(", "attr_ids", ")", "all_types", "=", "db", ".", "DBSession", ".", "query", "(", "TemplateType", ")", ".", "options", "(", "joinedload_all", "(", "'typeattrs'", ")", ")", ".", "filter", "(", "TemplateType", ".", "resource_type", "==", "resource", ".", "ref_key", ")", "if", "template_id", "is", "not", "None", ":", "all_types", "=", "all_types", ".", "filter", "(", "TemplateType", ".", "template_id", "==", "template_id", ")", "all_types", "=", "all_types", ".", "all", "(", ")", "#tmpl type attrs must be a subset of the resource's attrs", "for", "ttype", "in", "all_types", ":", "type_attr_ids", "=", "[", "]", "for", "typeattr", "in", "ttype", ".", "typeattrs", ":", "type_attr_ids", ".", "append", "(", "typeattr", ".", "attr_id", ")", "if", "set", "(", "type_attr_ids", ")", ".", "issubset", "(", "all_resource_attr_ids", ")", ":", "resource_type_templates", ".", "append", "(", "ttype", ")", "return", "resource_type_templates" ]
Using the attributes of the resource, get all the types that this resource matches. @returns a dictionary, keyed on the template name, with the value being the list of type names which match the resources attributes.
[ "Using", "the", "attributes", "of", "the", "resource", "get", "all", "the", "types", "that", "this", "resource", "matches", "." ]
python
train
36.4375
joke2k/faker
faker/providers/address/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/address/__init__.py#L52-L57
def street_name(self): """ :example 'Crist Parks' """ pattern = self.random_element(self.street_name_formats) return self.generator.parse(pattern)
[ "def", "street_name", "(", "self", ")", ":", "pattern", "=", "self", ".", "random_element", "(", "self", ".", "street_name_formats", ")", "return", "self", ".", "generator", ".", "parse", "(", "pattern", ")" ]
:example 'Crist Parks'
[ ":", "example", "Crist", "Parks" ]
python
train
30.166667
hsolbrig/PyShEx
pyshex/shape_expressions_language/p5_2_validation_definition.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_2_validation_definition.py#L14-L53
def isValid(cntxt: Context, m: FixedShapeMap) -> Tuple[bool, List[str]]: """`5.2 Validation Definition <http://shex.io/shex-semantics/#validation>`_ The expression isValid(G, m) indicates that for every nodeSelector/shapeLabel pair (n, s) in m, s has a corresponding shape expression se and satisfies(n, se, G, m). satisfies is defined below for each form of shape expression :param cntxt: evaluation context - includes graph and schema :param m: list of NodeShape pairs to test :return: Success/failure indicator and, if fail, a list of failure reasons """ if not cntxt.is_valid: return False, cntxt.error_list parse_nodes = [] for nodeshapepair in m: n = nodeshapepair.nodeSelector if not isinstance_(n, Node): return False, [f"{n}: Triple patterns are not implemented"] # The third test below is because the spec asserts that completely empty graphs pass in certain circumstances elif not (next(cntxt.graph.predicate_objects(nodeshapepair.nodeSelector), None) or next(cntxt.graph.subject_predicates(nodeshapepair.nodeSelector), None) or not next(cntxt.graph.triples((None, None, None)), None)): return False, [f"Focus: {nodeshapepair.nodeSelector} not in graph"] else: s = cntxt.shapeExprFor(START if nodeshapepair.shapeLabel is None or nodeshapepair.shapeLabel is START else nodeshapepair.shapeLabel) cntxt.current_node = ParseNode(satisfies, s, n, cntxt) if not s: if nodeshapepair.shapeLabel is START or nodeshapepair.shapeLabel is None: cntxt.fail_reason = "START node is not specified or is invalid" else: cntxt.fail_reason = f"Shape: {nodeshapepair.shapeLabel} not found in Schema" return False, cntxt.process_reasons() parse_nodes.append(cntxt.current_node) if not satisfies(cntxt, n, s): cntxt.current_node.result = False return False, cntxt.process_reasons() else: cntxt.current_node.result = True return True, []
[ "def", "isValid", "(", "cntxt", ":", "Context", ",", "m", ":", "FixedShapeMap", ")", "->", "Tuple", "[", "bool", ",", "List", "[", "str", "]", "]", ":", "if", "not", "cntxt", ".", "is_valid", ":", "return", "False", ",", "cntxt", ".", "error_list", "parse_nodes", "=", "[", "]", "for", "nodeshapepair", "in", "m", ":", "n", "=", "nodeshapepair", ".", "nodeSelector", "if", "not", "isinstance_", "(", "n", ",", "Node", ")", ":", "return", "False", ",", "[", "f\"{n}: Triple patterns are not implemented\"", "]", "# The third test below is because the spec asserts that completely empty graphs pass in certain circumstances", "elif", "not", "(", "next", "(", "cntxt", ".", "graph", ".", "predicate_objects", "(", "nodeshapepair", ".", "nodeSelector", ")", ",", "None", ")", "or", "next", "(", "cntxt", ".", "graph", ".", "subject_predicates", "(", "nodeshapepair", ".", "nodeSelector", ")", ",", "None", ")", "or", "not", "next", "(", "cntxt", ".", "graph", ".", "triples", "(", "(", "None", ",", "None", ",", "None", ")", ")", ",", "None", ")", ")", ":", "return", "False", ",", "[", "f\"Focus: {nodeshapepair.nodeSelector} not in graph\"", "]", "else", ":", "s", "=", "cntxt", ".", "shapeExprFor", "(", "START", "if", "nodeshapepair", ".", "shapeLabel", "is", "None", "or", "nodeshapepair", ".", "shapeLabel", "is", "START", "else", "nodeshapepair", ".", "shapeLabel", ")", "cntxt", ".", "current_node", "=", "ParseNode", "(", "satisfies", ",", "s", ",", "n", ",", "cntxt", ")", "if", "not", "s", ":", "if", "nodeshapepair", ".", "shapeLabel", "is", "START", "or", "nodeshapepair", ".", "shapeLabel", "is", "None", ":", "cntxt", ".", "fail_reason", "=", "\"START node is not specified or is invalid\"", "else", ":", "cntxt", ".", "fail_reason", "=", "f\"Shape: {nodeshapepair.shapeLabel} not found in Schema\"", "return", "False", ",", "cntxt", ".", "process_reasons", "(", ")", "parse_nodes", ".", "append", "(", "cntxt", ".", "current_node", ")", "if", "not", "satisfies", "(", "cntxt", ",", "n", ",", "s", ")", ":", "cntxt", ".", "current_node", ".", "result", "=", "False", "return", "False", ",", "cntxt", ".", "process_reasons", "(", ")", "else", ":", "cntxt", ".", "current_node", ".", "result", "=", "True", "return", "True", ",", "[", "]" ]
`5.2 Validation Definition <http://shex.io/shex-semantics/#validation>`_ The expression isValid(G, m) indicates that for every nodeSelector/shapeLabel pair (n, s) in m, s has a corresponding shape expression se and satisfies(n, se, G, m). satisfies is defined below for each form of shape expression :param cntxt: evaluation context - includes graph and schema :param m: list of NodeShape pairs to test :return: Success/failure indicator and, if fail, a list of failure reasons
[ "5", ".", "2", "Validation", "Definition", "<http", ":", "//", "shex", ".", "io", "/", "shex", "-", "semantics", "/", "#validation", ">", "_" ]
python
train
54.875
galactics/beyond
beyond/utils/ccsds.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/utils/ccsds.py#L105-L167
def _read_oem(string): """ Args: string (str): String containing the OEM Return: Ephem: """ ephems = [] required = ('REF_FRAME', 'CENTER_NAME', 'TIME_SYSTEM', 'OBJECT_ID', 'OBJECT_NAME') mode = None for line in string.splitlines(): if not line or line.startswith("COMMENT"): # pragma: no cover continue elif line.startswith("META_START"): mode = "meta" ephem = {'orbits': []} ephems.append(ephem) elif line.startswith("META_STOP"): mode = "data" # Check for required fields for k in required: if k not in ephem: raise ValueError("Missing field '{}'".format(k)) # Conversion to be compliant with beyond.env.jpl dynamic reference # frames naming convention. if ephem['CENTER_NAME'].lower() != "earth": ephem['REF_FRAME'] = ephem['CENTER_NAME'].title().replace(" ", "") elif mode == "meta": key, _, value = line.partition("=") ephem[key.strip()] = value.strip() elif mode == "data": date, *state_vector = line.split() date = Date.strptime(date, "%Y-%m-%dT%H:%M:%S.%f", scale=ephem['TIME_SYSTEM']) # Conversion from km to m, from km/s to m/s # and discard acceleration if present state_vector = np.array([float(x) for x in state_vector[:6]]) * 1000 ephem['orbits'].append(Orbit(date, state_vector, 'cartesian', ephem['REF_FRAME'], None)) for i, ephem_dict in enumerate(ephems): if not ephem_dict['orbits']: raise ValueError("Empty ephemeris") # In case there is no recommendation for interpolation # default to a Lagrange 8th order method = ephem_dict.get('INTERPOLATION', 'Lagrange').lower() order = int(ephem_dict.get('INTERPOLATION_DEGREE', 7)) + 1 ephem = Ephem(ephem_dict['orbits'], method=method, order=order) ephem.name = ephem_dict['OBJECT_NAME'] ephem.cospar_id = ephem_dict['OBJECT_ID'] ephems[i] = ephem if len(ephems) == 1: return ephems[0] return ephems
[ "def", "_read_oem", "(", "string", ")", ":", "ephems", "=", "[", "]", "required", "=", "(", "'REF_FRAME'", ",", "'CENTER_NAME'", ",", "'TIME_SYSTEM'", ",", "'OBJECT_ID'", ",", "'OBJECT_NAME'", ")", "mode", "=", "None", "for", "line", "in", "string", ".", "splitlines", "(", ")", ":", "if", "not", "line", "or", "line", ".", "startswith", "(", "\"COMMENT\"", ")", ":", "# pragma: no cover", "continue", "elif", "line", ".", "startswith", "(", "\"META_START\"", ")", ":", "mode", "=", "\"meta\"", "ephem", "=", "{", "'orbits'", ":", "[", "]", "}", "ephems", ".", "append", "(", "ephem", ")", "elif", "line", ".", "startswith", "(", "\"META_STOP\"", ")", ":", "mode", "=", "\"data\"", "# Check for required fields", "for", "k", "in", "required", ":", "if", "k", "not", "in", "ephem", ":", "raise", "ValueError", "(", "\"Missing field '{}'\"", ".", "format", "(", "k", ")", ")", "# Conversion to be compliant with beyond.env.jpl dynamic reference", "# frames naming convention.", "if", "ephem", "[", "'CENTER_NAME'", "]", ".", "lower", "(", ")", "!=", "\"earth\"", ":", "ephem", "[", "'REF_FRAME'", "]", "=", "ephem", "[", "'CENTER_NAME'", "]", ".", "title", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "elif", "mode", "==", "\"meta\"", ":", "key", ",", "_", ",", "value", "=", "line", ".", "partition", "(", "\"=\"", ")", "ephem", "[", "key", ".", "strip", "(", ")", "]", "=", "value", ".", "strip", "(", ")", "elif", "mode", "==", "\"data\"", ":", "date", ",", "", "*", "state_vector", "=", "line", ".", "split", "(", ")", "date", "=", "Date", ".", "strptime", "(", "date", ",", "\"%Y-%m-%dT%H:%M:%S.%f\"", ",", "scale", "=", "ephem", "[", "'TIME_SYSTEM'", "]", ")", "# Conversion from km to m, from km/s to m/s", "# and discard acceleration if present", "state_vector", "=", "np", ".", "array", "(", "[", "float", "(", "x", ")", "for", "x", "in", "state_vector", "[", ":", "6", "]", "]", ")", "*", "1000", "ephem", "[", "'orbits'", "]", ".", "append", "(", "Orbit", "(", "date", ",", "state_vector", ",", "'cartesian'", ",", "ephem", "[", "'REF_FRAME'", "]", ",", "None", ")", ")", "for", "i", ",", "ephem_dict", "in", "enumerate", "(", "ephems", ")", ":", "if", "not", "ephem_dict", "[", "'orbits'", "]", ":", "raise", "ValueError", "(", "\"Empty ephemeris\"", ")", "# In case there is no recommendation for interpolation", "# default to a Lagrange 8th order", "method", "=", "ephem_dict", ".", "get", "(", "'INTERPOLATION'", ",", "'Lagrange'", ")", ".", "lower", "(", ")", "order", "=", "int", "(", "ephem_dict", ".", "get", "(", "'INTERPOLATION_DEGREE'", ",", "7", ")", ")", "+", "1", "ephem", "=", "Ephem", "(", "ephem_dict", "[", "'orbits'", "]", ",", "method", "=", "method", ",", "order", "=", "order", ")", "ephem", ".", "name", "=", "ephem_dict", "[", "'OBJECT_NAME'", "]", "ephem", ".", "cospar_id", "=", "ephem_dict", "[", "'OBJECT_ID'", "]", "ephems", "[", "i", "]", "=", "ephem", "if", "len", "(", "ephems", ")", "==", "1", ":", "return", "ephems", "[", "0", "]", "return", "ephems" ]
Args: string (str): String containing the OEM Return: Ephem:
[ "Args", ":", "string", "(", "str", ")", ":", "String", "containing", "the", "OEM", "Return", ":", "Ephem", ":" ]
python
train
34.492063
NASA-AMMOS/AIT-Core
ait/core/bsc.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L229-L238
def socket_monitor_loop(self): ''' Monitor the socket and log captured data. ''' try: while True: gevent.socket.wait_read(self.socket.fileno()) self._handle_log_rotations() self.capture_packet() finally: self.clean_up()
[ "def", "socket_monitor_loop", "(", "self", ")", ":", "try", ":", "while", "True", ":", "gevent", ".", "socket", ".", "wait_read", "(", "self", ".", "socket", ".", "fileno", "(", ")", ")", "self", ".", "_handle_log_rotations", "(", ")", "self", ".", "capture_packet", "(", ")", "finally", ":", "self", ".", "clean_up", "(", ")" ]
Monitor the socket and log captured data.
[ "Monitor", "the", "socket", "and", "log", "captured", "data", "." ]
python
train
30.7
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L705-L710
def setValidityErrorHandler(self, err_func, warn_func, arg=None): """ Register error and warning handlers for RelaxNG validation. These will be called back as f(msg,arg) """ libxml2mod.xmlRelaxNGSetValidErrors(self._o, err_func, warn_func, arg)
[ "def", "setValidityErrorHandler", "(", "self", ",", "err_func", ",", "warn_func", ",", "arg", "=", "None", ")", ":", "libxml2mod", ".", "xmlRelaxNGSetValidErrors", "(", "self", ".", "_o", ",", "err_func", ",", "warn_func", ",", "arg", ")" ]
Register error and warning handlers for RelaxNG validation. These will be called back as f(msg,arg)
[ "Register", "error", "and", "warning", "handlers", "for", "RelaxNG", "validation", ".", "These", "will", "be", "called", "back", "as", "f", "(", "msg", "arg", ")" ]
python
train
46.5
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/natsd/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/natsd/driver.py#L243-L308
def on_response(self, msg): """ setup response if correlation id is the good one """ LOGGER.debug("natsd.Requester.on_response: " + str(sys.getsizeof(msg)) + " bytes received") working_response = json.loads(msg.data.decode()) working_properties = DriverTools.json2properties(working_response['properties']) working_body = b''+bytes(working_response['body'], 'utf8') if 'body' in working_response else None if DriverTools.MSG_CORRELATION_ID in working_properties: if self.corr_id == working_properties[DriverTools.MSG_CORRELATION_ID]: if DriverTools.MSG_SPLIT_COUNT in working_properties and \ int(working_properties[DriverTools.MSG_SPLIT_COUNT]) > 1: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None if self.split_responses is None: self.split_responses = [] self.split_responses_mid = working_properties[DriverTools.MSG_SPLIT_MID] if working_properties[DriverTools.MSG_SPLIT_MID] == self.split_responses_mid: response = { 'properties': working_properties, 'body': working_body_decoded } self.split_responses.insert(int(working_properties[DriverTools.MSG_SPLIT_OID]), response) if self.split_responses.__len__() == int(working_properties[DriverTools.MSG_SPLIT_COUNT]): properties = {} body = b'' for num in range(0, self.split_responses.__len__()): properties.update(self.split_responses[num]['properties']) body += self.split_responses[num]['body'] self.response = { 'properties': properties, 'body': body } self.split_responses = None self.split_responses_mid = None else: LOGGER.warn("natsd.Requester.on_response - discarded response : (" + str(working_properties[DriverTools.MSG_CORRELATION_ID]) + "," + str(working_properties[DriverTools.MSG_SPLIT_MID]) + ")") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else \ bytes(json.dumps({}), 'utf8') self.response = { 'properties': working_properties, 'body': working_body_decoded } else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response : " + str(working_properties[DriverTools.MSG_CORRELATION_ID])) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response (no correlation ID)") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded }))
[ "def", "on_response", "(", "self", ",", "msg", ")", ":", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response: \"", "+", "str", "(", "sys", ".", "getsizeof", "(", "msg", ")", ")", "+", "\" bytes received\"", ")", "working_response", "=", "json", ".", "loads", "(", "msg", ".", "data", ".", "decode", "(", ")", ")", "working_properties", "=", "DriverTools", ".", "json2properties", "(", "working_response", "[", "'properties'", "]", ")", "working_body", "=", "b''", "+", "bytes", "(", "working_response", "[", "'body'", "]", ",", "'utf8'", ")", "if", "'body'", "in", "working_response", "else", "None", "if", "DriverTools", ".", "MSG_CORRELATION_ID", "in", "working_properties", ":", "if", "self", ".", "corr_id", "==", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ":", "if", "DriverTools", ".", "MSG_SPLIT_COUNT", "in", "working_properties", "and", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_COUNT", "]", ")", ">", "1", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "if", "self", ".", "split_responses", "is", "None", ":", "self", ".", "split_responses", "=", "[", "]", "self", ".", "split_responses_mid", "=", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", "if", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", "==", "self", ".", "split_responses_mid", ":", "response", "=", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", "self", ".", "split_responses", ".", "insert", "(", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_OID", "]", ")", ",", "response", ")", "if", "self", ".", "split_responses", ".", "__len__", "(", ")", "==", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_COUNT", "]", ")", ":", "properties", "=", "{", "}", "body", "=", "b''", "for", "num", "in", "range", "(", "0", ",", "self", ".", "split_responses", ".", "__len__", "(", ")", ")", ":", "properties", ".", "update", "(", "self", ".", "split_responses", "[", "num", "]", "[", "'properties'", "]", ")", "body", "+=", "self", ".", "split_responses", "[", "num", "]", "[", "'body'", "]", "self", ".", "response", "=", "{", "'properties'", ":", "properties", ",", "'body'", ":", "body", "}", "self", ".", "split_responses", "=", "None", "self", ".", "split_responses_mid", "=", "None", "else", ":", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response : (\"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ")", "+", "\",\"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", ")", "+", "\")\"", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "bytes", "(", "json", ".", "dumps", "(", "{", "}", ")", ",", "'utf8'", ")", "self", ".", "response", "=", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response (no correlation ID)\"", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")" ]
setup response if correlation id is the good one
[ "setup", "response", "if", "correlation", "id", "is", "the", "good", "one" ]
python
train
60.363636
codeforamerica/three
three/core.py
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L142-L146
def _format_dates(self, start, end): """Format start and end dates.""" start = self._split_date(start) end = self._split_date(end) return start, end
[ "def", "_format_dates", "(", "self", ",", "start", ",", "end", ")", ":", "start", "=", "self", ".", "_split_date", "(", "start", ")", "end", "=", "self", ".", "_split_date", "(", "end", ")", "return", "start", ",", "end" ]
Format start and end dates.
[ "Format", "start", "and", "end", "dates", "." ]
python
train
35.2
KeplerGO/K2fov
K2fov/greatcircle.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/greatcircle.py#L10-L42
def sphericalAngSep(ra0, dec0, ra1, dec1, radians=False): """ Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0 """ if radians==False: ra0 = np.radians(ra0) dec0 = np.radians(dec0) ra1 = np.radians(ra1) dec1 = np.radians(dec1) deltaRa= ra1-ra0 deltaDec= dec1-dec0 val = haversine(deltaDec) val += np.cos(dec0) * np.cos(dec1) * haversine(deltaRa) val = min(1, np.sqrt(val)) ; #Guard against round off error? val = 2*np.arcsin(val) #Convert back to degrees if necessary if radians==False: val = np.degrees(val) return val
[ "def", "sphericalAngSep", "(", "ra0", ",", "dec0", ",", "ra1", ",", "dec1", ",", "radians", "=", "False", ")", ":", "if", "radians", "==", "False", ":", "ra0", "=", "np", ".", "radians", "(", "ra0", ")", "dec0", "=", "np", ".", "radians", "(", "dec0", ")", "ra1", "=", "np", ".", "radians", "(", "ra1", ")", "dec1", "=", "np", ".", "radians", "(", "dec1", ")", "deltaRa", "=", "ra1", "-", "ra0", "deltaDec", "=", "dec1", "-", "dec0", "val", "=", "haversine", "(", "deltaDec", ")", "val", "+=", "np", ".", "cos", "(", "dec0", ")", "*", "np", ".", "cos", "(", "dec1", ")", "*", "haversine", "(", "deltaRa", ")", "val", "=", "min", "(", "1", ",", "np", ".", "sqrt", "(", "val", ")", ")", "#Guard against round off error?", "val", "=", "2", "*", "np", ".", "arcsin", "(", "val", ")", "#Convert back to degrees if necessary", "if", "radians", "==", "False", ":", "val", "=", "np", ".", "degrees", "(", "val", ")", "return", "val" ]
Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0
[ "Compute", "the", "spherical", "angular", "separation", "between", "two", "points", "on", "the", "sky", "." ]
python
train
27.515152
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L1079-L1114
def delete_assessment_part(self, assessment_part_id): """Removes an asessment part and all mapped items. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` raise: NotFound - ``assessment_part_id`` not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Should be implemented from template for # osid.learning.ObjectiveAdminSession.delete_objective_template # but need to handle magic part delete ... if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if collection.find({'assessmentPartId': str(assessment_part_id)}).count() != 0: raise errors.IllegalState('there are still AssessmentParts associated with this AssessmentPart') collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) try: apls = get_assessment_part_lookup_session(runtime=self._runtime, proxy=self._proxy) apls.use_unsequestered_assessment_part_view() apls.use_federated_bank_view() part = apls.get_assessment_part(assessment_part_id) part.delete() except AttributeError: collection.delete_one({'_id': ObjectId(assessment_part_id.get_identifier())})
[ "def", "delete_assessment_part", "(", "self", ",", "assessment_part_id", ")", ":", "# Should be implemented from template for", "# osid.learning.ObjectiveAdminSession.delete_objective_template", "# but need to handle magic part delete ...", "if", "not", "isinstance", "(", "assessment_part_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the argument is not a valid OSID Id'", ")", "collection", "=", "JSONClientValidated", "(", "'assessment_authoring'", ",", "collection", "=", "'AssessmentPart'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "collection", ".", "find", "(", "{", "'assessmentPartId'", ":", "str", "(", "assessment_part_id", ")", "}", ")", ".", "count", "(", ")", "!=", "0", ":", "raise", "errors", ".", "IllegalState", "(", "'there are still AssessmentParts associated with this AssessmentPart'", ")", "collection", "=", "JSONClientValidated", "(", "'assessment_authoring'", ",", "collection", "=", "'AssessmentPart'", ",", "runtime", "=", "self", ".", "_runtime", ")", "try", ":", "apls", "=", "get_assessment_part_lookup_session", "(", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "apls", ".", "use_unsequestered_assessment_part_view", "(", ")", "apls", ".", "use_federated_bank_view", "(", ")", "part", "=", "apls", ".", "get_assessment_part", "(", "assessment_part_id", ")", "part", ".", "delete", "(", ")", "except", "AttributeError", ":", "collection", ".", "delete_one", "(", "{", "'_id'", ":", "ObjectId", "(", "assessment_part_id", ".", "get_identifier", "(", ")", ")", "}", ")" ]
Removes an asessment part and all mapped items. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` raise: NotFound - ``assessment_part_id`` not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Removes", "an", "asessment", "part", "and", "all", "mapped", "items", "." ]
python
train
52.583333
ska-sa/katcp-python
katcp/resource.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource.py#L824-L828
def set_value(self, value, status=Sensor.NOMINAL, timestamp=None): """Set sensor value with optinal specification of status and timestamp""" if timestamp is None: timestamp = self._manager.time() self.set(timestamp, status, value)
[ "def", "set_value", "(", "self", ",", "value", ",", "status", "=", "Sensor", ".", "NOMINAL", ",", "timestamp", "=", "None", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "self", ".", "_manager", ".", "time", "(", ")", "self", ".", "set", "(", "timestamp", ",", "status", ",", "value", ")" ]
Set sensor value with optinal specification of status and timestamp
[ "Set", "sensor", "value", "with", "optinal", "specification", "of", "status", "and", "timestamp" ]
python
train
52.4
ianmiell/shutit
shutit_setup.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_setup.py#L170-L184
def build(self, shutit): """Initializes target ready for build and updating package management if in container. """ if shutit.build['delivery'] in ('docker','dockerfile'): if shutit.get_current_shutit_pexpect_session_environment().install_type == 'apt': shutit.add_to_bashrc('export DEBIAN_FRONTEND=noninteractive') if not shutit.command_available('lsb_release'): shutit.install('lsb-release') shutit.lsb_release() elif shutit.get_current_shutit_pexpect_session_environment().install_type == 'yum': # yum updates are so often "bad" that we let exit codes of 1 through. # TODO: make this more sophisticated shutit.send('yum update -y', timeout=9999, exit_values=['0', '1']) shutit.pause_point('Anything you want to do to the target host ' + 'before the build starts?', level=2) return True
[ "def", "build", "(", "self", ",", "shutit", ")", ":", "if", "shutit", ".", "build", "[", "'delivery'", "]", "in", "(", "'docker'", ",", "'dockerfile'", ")", ":", "if", "shutit", ".", "get_current_shutit_pexpect_session_environment", "(", ")", ".", "install_type", "==", "'apt'", ":", "shutit", ".", "add_to_bashrc", "(", "'export DEBIAN_FRONTEND=noninteractive'", ")", "if", "not", "shutit", ".", "command_available", "(", "'lsb_release'", ")", ":", "shutit", ".", "install", "(", "'lsb-release'", ")", "shutit", ".", "lsb_release", "(", ")", "elif", "shutit", ".", "get_current_shutit_pexpect_session_environment", "(", ")", ".", "install_type", "==", "'yum'", ":", "# yum updates are so often \"bad\" that we let exit codes of 1 through.", "# TODO: make this more sophisticated", "shutit", ".", "send", "(", "'yum update -y'", ",", "timeout", "=", "9999", ",", "exit_values", "=", "[", "'0'", ",", "'1'", "]", ")", "shutit", ".", "pause_point", "(", "'Anything you want to do to the target host '", "+", "'before the build starts?'", ",", "level", "=", "2", ")", "return", "True" ]
Initializes target ready for build and updating package management if in container.
[ "Initializes", "target", "ready", "for", "build", "and", "updating", "package", "management", "if", "in", "container", "." ]
python
train
54.666667
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/outcomes.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/outcomes.py#L225-L232
def remove_core_element(self, model): """Remove respective core element of handed outcome model :param OutcomeModel model: Outcome model which core element should be removed :return: """ assert model.outcome.parent is self.model.state gui_helper_state_machine.delete_core_element_of_model(model)
[ "def", "remove_core_element", "(", "self", ",", "model", ")", ":", "assert", "model", ".", "outcome", ".", "parent", "is", "self", ".", "model", ".", "state", "gui_helper_state_machine", ".", "delete_core_element_of_model", "(", "model", ")" ]
Remove respective core element of handed outcome model :param OutcomeModel model: Outcome model which core element should be removed :return:
[ "Remove", "respective", "core", "element", "of", "handed", "outcome", "model" ]
python
train
42.125
bitlabstudio/django-dashboard-app
dashboard_app/widget_pool.py
https://github.com/bitlabstudio/django-dashboard-app/blob/ed98f2bca91a4ced36d0dd1aa1baee78e989cf64/dashboard_app/widget_pool.py#L69-L91
def register_widget(self, widget_cls, **widget_kwargs): """ Registers the given widget. Widgets must inherit ``DashboardWidgetBase`` and you cannot register the same widget twice. :widget_cls: A class that inherits ``DashboardWidgetBase``. """ if not issubclass(widget_cls, DashboardWidgetBase): raise ImproperlyConfigured( 'DashboardWidgets must be subclasses of DashboardWidgetBase,' ' {0} is not.'.format(widget_cls)) widget = widget_cls(**widget_kwargs) widget_name = widget.get_name() if widget_name in self.widgets: raise WidgetAlreadyRegistered( 'Cannot register {0}, a plugin with this name {1} is already ' 'registered.'.format(widget_cls, widget_name)) self.widgets[widget_name] = widget
[ "def", "register_widget", "(", "self", ",", "widget_cls", ",", "*", "*", "widget_kwargs", ")", ":", "if", "not", "issubclass", "(", "widget_cls", ",", "DashboardWidgetBase", ")", ":", "raise", "ImproperlyConfigured", "(", "'DashboardWidgets must be subclasses of DashboardWidgetBase,'", "' {0} is not.'", ".", "format", "(", "widget_cls", ")", ")", "widget", "=", "widget_cls", "(", "*", "*", "widget_kwargs", ")", "widget_name", "=", "widget", ".", "get_name", "(", ")", "if", "widget_name", "in", "self", ".", "widgets", ":", "raise", "WidgetAlreadyRegistered", "(", "'Cannot register {0}, a plugin with this name {1} is already '", "'registered.'", ".", "format", "(", "widget_cls", ",", "widget_name", ")", ")", "self", ".", "widgets", "[", "widget_name", "]", "=", "widget" ]
Registers the given widget. Widgets must inherit ``DashboardWidgetBase`` and you cannot register the same widget twice. :widget_cls: A class that inherits ``DashboardWidgetBase``.
[ "Registers", "the", "given", "widget", "." ]
python
test
37.217391
draperjames/qtpandas
qtpandas/utils.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/utils.py#L208-L218
def dedupe_cols(frame): """ Need to dedupe columns that have the same name. """ cols = list(frame.columns) for i, item in enumerate(frame.columns): if item in frame.columns[:i]: cols[i] = "toDROP" frame.columns = cols return frame.drop("toDROP", 1, errors='ignore')
[ "def", "dedupe_cols", "(", "frame", ")", ":", "cols", "=", "list", "(", "frame", ".", "columns", ")", "for", "i", ",", "item", "in", "enumerate", "(", "frame", ".", "columns", ")", ":", "if", "item", "in", "frame", ".", "columns", "[", ":", "i", "]", ":", "cols", "[", "i", "]", "=", "\"toDROP\"", "frame", ".", "columns", "=", "cols", "return", "frame", ".", "drop", "(", "\"toDROP\"", ",", "1", ",", "errors", "=", "'ignore'", ")" ]
Need to dedupe columns that have the same name.
[ "Need", "to", "dedupe", "columns", "that", "have", "the", "same", "name", "." ]
python
train
27.636364
manahl/arctic
arctic/chunkstore/chunkstore.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L119-L168
def delete(self, symbol, chunk_range=None, audit=None): """ Delete all chunks for a symbol, or optionally, chunks within a range Parameters ---------- symbol : str symbol name for the item chunk_range: range object a date range to delete audit: dict dict to store in the audit log """ if chunk_range is not None: sym = self._get_symbol_info(symbol) # read out chunks that fall within the range and filter out # data within the range df = self.read(symbol, chunk_range=chunk_range, filter_data=False) row_adjust = len(df) if not df.empty: df = CHUNKER_MAP[sym[CHUNKER]].exclude(df, chunk_range) # remove chunks, and update any remaining data query = {SYMBOL: symbol} query.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range)) self._collection.delete_many(query) self._mdata.delete_many(query) self.update(symbol, df) # update symbol metadata (rows and chunk count) sym = self._get_symbol_info(symbol) sym[LEN] -= row_adjust sym[CHUNK_COUNT] = mongo_count(self._collection, filter={SYMBOL: symbol}) self._symbols.replace_one({SYMBOL: symbol}, sym) else: query = {SYMBOL: symbol} self._collection.delete_many(query) self._symbols.delete_many(query) self._mdata.delete_many(query) if audit is not None: audit['symbol'] = symbol if chunk_range is not None: audit['rows_deleted'] = row_adjust audit['action'] = 'range delete' else: audit['action'] = 'symbol delete' self._audit.insert_one(audit)
[ "def", "delete", "(", "self", ",", "symbol", ",", "chunk_range", "=", "None", ",", "audit", "=", "None", ")", ":", "if", "chunk_range", "is", "not", "None", ":", "sym", "=", "self", ".", "_get_symbol_info", "(", "symbol", ")", "# read out chunks that fall within the range and filter out", "# data within the range", "df", "=", "self", ".", "read", "(", "symbol", ",", "chunk_range", "=", "chunk_range", ",", "filter_data", "=", "False", ")", "row_adjust", "=", "len", "(", "df", ")", "if", "not", "df", ".", "empty", ":", "df", "=", "CHUNKER_MAP", "[", "sym", "[", "CHUNKER", "]", "]", ".", "exclude", "(", "df", ",", "chunk_range", ")", "# remove chunks, and update any remaining data", "query", "=", "{", "SYMBOL", ":", "symbol", "}", "query", ".", "update", "(", "CHUNKER_MAP", "[", "sym", "[", "CHUNKER", "]", "]", ".", "to_mongo", "(", "chunk_range", ")", ")", "self", ".", "_collection", ".", "delete_many", "(", "query", ")", "self", ".", "_mdata", ".", "delete_many", "(", "query", ")", "self", ".", "update", "(", "symbol", ",", "df", ")", "# update symbol metadata (rows and chunk count)", "sym", "=", "self", ".", "_get_symbol_info", "(", "symbol", ")", "sym", "[", "LEN", "]", "-=", "row_adjust", "sym", "[", "CHUNK_COUNT", "]", "=", "mongo_count", "(", "self", ".", "_collection", ",", "filter", "=", "{", "SYMBOL", ":", "symbol", "}", ")", "self", ".", "_symbols", ".", "replace_one", "(", "{", "SYMBOL", ":", "symbol", "}", ",", "sym", ")", "else", ":", "query", "=", "{", "SYMBOL", ":", "symbol", "}", "self", ".", "_collection", ".", "delete_many", "(", "query", ")", "self", ".", "_symbols", ".", "delete_many", "(", "query", ")", "self", ".", "_mdata", ".", "delete_many", "(", "query", ")", "if", "audit", "is", "not", "None", ":", "audit", "[", "'symbol'", "]", "=", "symbol", "if", "chunk_range", "is", "not", "None", ":", "audit", "[", "'rows_deleted'", "]", "=", "row_adjust", "audit", "[", "'action'", "]", "=", "'range delete'", "else", ":", "audit", "[", "'action'", "]", "=", "'symbol delete'", "self", ".", "_audit", ".", "insert_one", "(", "audit", ")" ]
Delete all chunks for a symbol, or optionally, chunks within a range Parameters ---------- symbol : str symbol name for the item chunk_range: range object a date range to delete audit: dict dict to store in the audit log
[ "Delete", "all", "chunks", "for", "a", "symbol", "or", "optionally", "chunks", "within", "a", "range" ]
python
train
37.62
CxAalto/gtfspy
gtfspy/routing/profile_block_analyzer.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/profile_block_analyzer.py#L185-L213
def _temporal_distance_pdf(self): """ Temporal distance probability density function. Returns ------- non_delta_peak_split_points: numpy.array non_delta_peak_densities: numpy.array len(density) == len(temporal_distance_split_points_ordered) -1 delta_peak_loc_to_probability_mass : dict """ temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf() delta_peak_loc_to_probability_mass = {} non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]] non_delta_peak_densities = [] for i in range(0, len(temporal_distance_split_points_ordered) - 1): left = temporal_distance_split_points_ordered[i] right = temporal_distance_split_points_ordered[i + 1] width = right - left prob_mass = norm_cdf[i + 1] - norm_cdf[i] if width == 0.0: delta_peak_loc_to_probability_mass[left] = prob_mass else: non_delta_peak_split_points.append(right) non_delta_peak_densities.append(prob_mass / float(width)) assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1) return numpy.array(non_delta_peak_split_points), \ numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass
[ "def", "_temporal_distance_pdf", "(", "self", ")", ":", "temporal_distance_split_points_ordered", ",", "norm_cdf", "=", "self", ".", "_temporal_distance_cdf", "(", ")", "delta_peak_loc_to_probability_mass", "=", "{", "}", "non_delta_peak_split_points", "=", "[", "temporal_distance_split_points_ordered", "[", "0", "]", "]", "non_delta_peak_densities", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "temporal_distance_split_points_ordered", ")", "-", "1", ")", ":", "left", "=", "temporal_distance_split_points_ordered", "[", "i", "]", "right", "=", "temporal_distance_split_points_ordered", "[", "i", "+", "1", "]", "width", "=", "right", "-", "left", "prob_mass", "=", "norm_cdf", "[", "i", "+", "1", "]", "-", "norm_cdf", "[", "i", "]", "if", "width", "==", "0.0", ":", "delta_peak_loc_to_probability_mass", "[", "left", "]", "=", "prob_mass", "else", ":", "non_delta_peak_split_points", ".", "append", "(", "right", ")", "non_delta_peak_densities", ".", "append", "(", "prob_mass", "/", "float", "(", "width", ")", ")", "assert", "(", "len", "(", "non_delta_peak_densities", ")", "==", "len", "(", "non_delta_peak_split_points", ")", "-", "1", ")", "return", "numpy", ".", "array", "(", "non_delta_peak_split_points", ")", ",", "numpy", ".", "array", "(", "non_delta_peak_densities", ")", ",", "delta_peak_loc_to_probability_mass" ]
Temporal distance probability density function. Returns ------- non_delta_peak_split_points: numpy.array non_delta_peak_densities: numpy.array len(density) == len(temporal_distance_split_points_ordered) -1 delta_peak_loc_to_probability_mass : dict
[ "Temporal", "distance", "probability", "density", "function", "." ]
python
valid
47.206897
andela-sjames/paystack-python
paystackapi/base.py
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L45-L60
def _request(self, method, resource_uri, **kwargs): """Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response """ data = kwargs.get('data') response = method(self.API_BASE_URL + resource_uri, json=data, headers=self.headers) response.raise_for_status() return response.json()
[ "def", "_request", "(", "self", ",", "method", ",", "resource_uri", ",", "*", "*", "kwargs", ")", ":", "data", "=", "kwargs", ".", "get", "(", "'data'", ")", "response", "=", "method", "(", "self", ".", "API_BASE_URL", "+", "resource_uri", ",", "json", "=", "data", ",", "headers", "=", "self", ".", "headers", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response
[ "Perform", "a", "method", "on", "a", "resource", "." ]
python
train
30.5625
MisterY/gnucash-portfolio
reports/report_vanguard_prices/report_vanguard_prices.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/reports/report_vanguard_prices/report_vanguard_prices.py#L18-L27
def generate_report( book_url, fund_ids: StringOption( section="Funds", sort_tag="c", documentation_string="Comma-separated list of fund ids.", default_value="8123,8146,8148,8147") ): """Generates the report output""" return render_report(book_url, fund_ids)
[ "def", "generate_report", "(", "book_url", ",", "fund_ids", ":", "StringOption", "(", "section", "=", "\"Funds\"", ",", "sort_tag", "=", "\"c\"", ",", "documentation_string", "=", "\"Comma-separated list of fund ids.\"", ",", "default_value", "=", "\"8123,8146,8148,8147\"", ")", ")", ":", "return", "render_report", "(", "book_url", ",", "fund_ids", ")" ]
Generates the report output
[ "Generates", "the", "report", "output" ]
python
train
32.5
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2894-L2906
def MediaBoxSize(self): """Retrieve width, height of /MediaBox.""" CheckParent(self) val = _fitz.Page_MediaBoxSize(self) val = Point(val) if not bool(val): r = self.rect val = Point(r.width, r.height) return val
[ "def", "MediaBoxSize", "(", "self", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_MediaBoxSize", "(", "self", ")", "val", "=", "Point", "(", "val", ")", "if", "not", "bool", "(", "val", ")", ":", "r", "=", "self", ".", "rect", "val", "=", "Point", "(", "r", ".", "width", ",", "r", ".", "height", ")", "return", "val" ]
Retrieve width, height of /MediaBox.
[ "Retrieve", "width", "height", "of", "/", "MediaBox", "." ]
python
train
21.153846
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L365-L369
def close(self): """Send CLOSE command to device.""" close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00) self._send_method(close_command, self._close_message_received)
[ "def", "close", "(", "self", ")", ":", "close_command", "=", "StandardSend", "(", "self", ".", "_address", ",", "COMMAND_LIGHT_OFF_0X13_0X00", ")", "self", ".", "_send_method", "(", "close_command", ",", "self", ".", "_close_message_received", ")" ]
Send CLOSE command to device.
[ "Send", "CLOSE", "command", "to", "device", "." ]
python
train
49
apple/turicreate
deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/libs/metaparse/tools/benchmark/generate.py#L233-L235
def out_filename(template, n_val, mode): """Determine the output filename""" return '{0}_{1}_{2}.cpp'.format(template.name, n_val, mode.identifier)
[ "def", "out_filename", "(", "template", ",", "n_val", ",", "mode", ")", ":", "return", "'{0}_{1}_{2}.cpp'", ".", "format", "(", "template", ".", "name", ",", "n_val", ",", "mode", ".", "identifier", ")" ]
Determine the output filename
[ "Determine", "the", "output", "filename" ]
python
train
51
pycontribs/jira
jira/resources.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/resources.py#L251-L347
def update(self, fields=None, async_=None, jira=None, notify=True, **kwargs): """Update this resource on the server. Keyword arguments are marshalled into a dict before being sent. If this resource doesn't support ``PUT``, a :py:exc:`.JIRAError` will be raised; subclasses that specialize this method will only raise errors in case of user error. :param fields: Fields which should be updated for the object. :type fields: Optional[Dict[str, Any]] :param async_: If true the request will be added to the queue so it can be executed later using async_run() :type async_: bool :param jira: Instance of JIRA Client :type jira: jira.JIRA :param notify: Whether or not to notify users about the update. (Default: True) :type notify: bool :type kwargs: **Any """ if async_ is None: async_ = self._options['async'] data = {} if fields is not None: data.update(fields) data.update(kwargs) data = json.dumps(data) if not notify: querystring = "?notifyUsers=false" else: querystring = "" r = self._session.put( self.self + querystring, data=data) if 'autofix' in self._options and \ r.status_code == 400: user = None error_list = get_error_list(r) logging.error(error_list) if "The reporter specified is not a user." in error_list: if 'reporter' not in data['fields']: logging.warning( "autofix: setting reporter to '%s' and retrying the update." % self._options['autofix']) data['fields']['reporter'] = { 'name': self._options['autofix']} if "Issues must be assigned." in error_list: if 'assignee' not in data['fields']: logging.warning("autofix: setting assignee to '%s' for %s and retrying the update." % ( self._options['autofix'], self.key)) data['fields']['assignee'] = { 'name': self._options['autofix']} # for some reason the above approach fails on Jira 5.2.11 # so we need to change the assignee before if "Issue type is a sub-task but parent issue key or id not specified." in error_list: logging.warning( "autofix: trying to fix sub-task without parent by converting to it to bug") data['fields']['issuetype'] = {"name": "Bug"} if "The summary is invalid because it contains newline characters." in error_list: logging.warning("autofix: trying to fix newline in summary") data['fields'][ 'summary'] = self.fields.summary.replace("/n", "") for error in error_list: if re.search(r"^User '(.*)' was not found in the system\.", error, re.U): m = re.search( r"^User '(.*)' was not found in the system\.", error, re.U) if m: user = m.groups()[0] else: raise NotImplementedError() if re.search(r"^User '(.*)' does not exist\.", error): m = re.search(r"^User '(.*)' does not exist\.", error) if m: user = m.groups()[0] else: raise NotImplementedError() if user: logging.warning( "Trying to add missing orphan user '%s' in order to complete the previous failed operation." % user) jira.add_user(user, '[email protected]', 10100, active=False) # if 'assignee' not in data['fields']: # logging.warning("autofix: setting assignee to '%s' and retrying the update." % self._options['autofix']) # data['fields']['assignee'] = {'name': self._options['autofix']} # EXPERIMENTAL ---> if async_: if not hasattr(self._session, '_async_jobs'): self._session._async_jobs = set() self._session._async_jobs.add(threaded_requests.put( self.self, data=json.dumps(data))) else: r = self._session.put( self.self, data=json.dumps(data)) time.sleep(self._options['delay_reload']) self._load(self.self)
[ "def", "update", "(", "self", ",", "fields", "=", "None", ",", "async_", "=", "None", ",", "jira", "=", "None", ",", "notify", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "async_", "is", "None", ":", "async_", "=", "self", ".", "_options", "[", "'async'", "]", "data", "=", "{", "}", "if", "fields", "is", "not", "None", ":", "data", ".", "update", "(", "fields", ")", "data", ".", "update", "(", "kwargs", ")", "data", "=", "json", ".", "dumps", "(", "data", ")", "if", "not", "notify", ":", "querystring", "=", "\"?notifyUsers=false\"", "else", ":", "querystring", "=", "\"\"", "r", "=", "self", ".", "_session", ".", "put", "(", "self", ".", "self", "+", "querystring", ",", "data", "=", "data", ")", "if", "'autofix'", "in", "self", ".", "_options", "and", "r", ".", "status_code", "==", "400", ":", "user", "=", "None", "error_list", "=", "get_error_list", "(", "r", ")", "logging", ".", "error", "(", "error_list", ")", "if", "\"The reporter specified is not a user.\"", "in", "error_list", ":", "if", "'reporter'", "not", "in", "data", "[", "'fields'", "]", ":", "logging", ".", "warning", "(", "\"autofix: setting reporter to '%s' and retrying the update.\"", "%", "self", ".", "_options", "[", "'autofix'", "]", ")", "data", "[", "'fields'", "]", "[", "'reporter'", "]", "=", "{", "'name'", ":", "self", ".", "_options", "[", "'autofix'", "]", "}", "if", "\"Issues must be assigned.\"", "in", "error_list", ":", "if", "'assignee'", "not", "in", "data", "[", "'fields'", "]", ":", "logging", ".", "warning", "(", "\"autofix: setting assignee to '%s' for %s and retrying the update.\"", "%", "(", "self", ".", "_options", "[", "'autofix'", "]", ",", "self", ".", "key", ")", ")", "data", "[", "'fields'", "]", "[", "'assignee'", "]", "=", "{", "'name'", ":", "self", ".", "_options", "[", "'autofix'", "]", "}", "# for some reason the above approach fails on Jira 5.2.11", "# so we need to change the assignee before", "if", "\"Issue type is a sub-task but parent issue key or id not specified.\"", "in", "error_list", ":", "logging", ".", "warning", "(", "\"autofix: trying to fix sub-task without parent by converting to it to bug\"", ")", "data", "[", "'fields'", "]", "[", "'issuetype'", "]", "=", "{", "\"name\"", ":", "\"Bug\"", "}", "if", "\"The summary is invalid because it contains newline characters.\"", "in", "error_list", ":", "logging", ".", "warning", "(", "\"autofix: trying to fix newline in summary\"", ")", "data", "[", "'fields'", "]", "[", "'summary'", "]", "=", "self", ".", "fields", ".", "summary", ".", "replace", "(", "\"/n\"", ",", "\"\"", ")", "for", "error", "in", "error_list", ":", "if", "re", ".", "search", "(", "r\"^User '(.*)' was not found in the system\\.\"", ",", "error", ",", "re", ".", "U", ")", ":", "m", "=", "re", ".", "search", "(", "r\"^User '(.*)' was not found in the system\\.\"", ",", "error", ",", "re", ".", "U", ")", "if", "m", ":", "user", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "raise", "NotImplementedError", "(", ")", "if", "re", ".", "search", "(", "r\"^User '(.*)' does not exist\\.\"", ",", "error", ")", ":", "m", "=", "re", ".", "search", "(", "r\"^User '(.*)' does not exist\\.\"", ",", "error", ")", "if", "m", ":", "user", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "raise", "NotImplementedError", "(", ")", "if", "user", ":", "logging", ".", "warning", "(", "\"Trying to add missing orphan user '%s' in order to complete the previous failed operation.\"", "%", "user", ")", "jira", ".", "add_user", "(", "user", ",", "'[email protected]'", ",", "10100", ",", "active", "=", "False", ")", "# if 'assignee' not in data['fields']:", "# logging.warning(\"autofix: setting assignee to '%s' and retrying the update.\" % self._options['autofix'])", "# data['fields']['assignee'] = {'name': self._options['autofix']}", "# EXPERIMENTAL --->", "if", "async_", ":", "if", "not", "hasattr", "(", "self", ".", "_session", ",", "'_async_jobs'", ")", ":", "self", ".", "_session", ".", "_async_jobs", "=", "set", "(", ")", "self", ".", "_session", ".", "_async_jobs", ".", "add", "(", "threaded_requests", ".", "put", "(", "self", ".", "self", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", ")", "else", ":", "r", "=", "self", ".", "_session", ".", "put", "(", "self", ".", "self", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "time", ".", "sleep", "(", "self", ".", "_options", "[", "'delay_reload'", "]", ")", "self", ".", "_load", "(", "self", ".", "self", ")" ]
Update this resource on the server. Keyword arguments are marshalled into a dict before being sent. If this resource doesn't support ``PUT``, a :py:exc:`.JIRAError` will be raised; subclasses that specialize this method will only raise errors in case of user error. :param fields: Fields which should be updated for the object. :type fields: Optional[Dict[str, Any]] :param async_: If true the request will be added to the queue so it can be executed later using async_run() :type async_: bool :param jira: Instance of JIRA Client :type jira: jira.JIRA :param notify: Whether or not to notify users about the update. (Default: True) :type notify: bool :type kwargs: **Any
[ "Update", "this", "resource", "on", "the", "server", "." ]
python
train
46.876289
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L321-L332
def sync(self, old_token=None): """Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens. """ # FIXME: Actually implement token = "http://radicale.org/ns/sync/%s" % self.etag.strip("\"") if old_token: raise ValueError("Sync token are not supported (you can ignore this warning)") return token, self.list()
[ "def", "sync", "(", "self", ",", "old_token", "=", "None", ")", ":", "# FIXME: Actually implement", "token", "=", "\"http://radicale.org/ns/sync/%s\"", "%", "self", ".", "etag", ".", "strip", "(", "\"\\\"\"", ")", "if", "old_token", ":", "raise", "ValueError", "(", "\"Sync token are not supported (you can ignore this warning)\"", ")", "return", "token", ",", "self", ".", "list", "(", ")" ]
Get the current sync token and changed items for synchronization. ``old_token`` an old sync token which is used as the base of the delta update. If sync token is missing, all items are returned. ValueError is raised for invalid or old tokens.
[ "Get", "the", "current", "sync", "token", "and", "changed", "items", "for", "synchronization", "." ]
python
train
47.25
erijo/tellcore-py
tellcore/telldus.py
https://github.com/erijo/tellcore-py/blob/7a1eb53e12ef039a2350933e502633df7560f6a8/tellcore/telldus.py#L48-L62
def process_callback(self, block=True): """Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False. """ try: (callback, args) = self._queue.get(block=block) try: callback(*args) finally: self._queue.task_done() except queue.Empty: return False return True
[ "def", "process_callback", "(", "self", ",", "block", "=", "True", ")", ":", "try", ":", "(", "callback", ",", "args", ")", "=", "self", ".", "_queue", ".", "get", "(", "block", "=", "block", ")", "try", ":", "callback", "(", "*", "args", ")", "finally", ":", "self", ".", "_queue", ".", "task_done", "(", ")", "except", "queue", ".", "Empty", ":", "return", "False", "return", "True" ]
Dispatch a single callback in the current thread. :param boolean block: If True, blocks waiting for a callback to come. :return: True if a callback was processed; otherwise False.
[ "Dispatch", "a", "single", "callback", "in", "the", "current", "thread", "." ]
python
train
33.4
aetros/aetros-cli
aetros/git.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/git.py#L582-L651
def stream_file(self, path, fast_lane=True): """ Create a temp file, stream it to the server if online and append its content using the write() method. This makes sure that we have all newest data of this file on the server directly. At the end of the job, the content the server received is stored as git blob on the server. It is then committed locally and pushed. Git detects that the server already has the version (through the continuous streaming) and won't push it again. Very handy for rather large files that will append over time (like channel data, logs) Example: self.log_stream = git.stream_file('log.txt') self.log_stream.write("new line\n"); self.log_stream.write("another line\n"); """ # create temp file # open temp file # register stream file and write locally # on end() git_commit that file locally # create socket connection to server # stream file to server # on end() send server end signal, so he can store its content in git as blob as well. # A git push would detect that both sides have the same content already, # except when server connection broke between start() and end(). # Result -> already transmitted logs/channel data (probably many MBs) won't transfered twice # when doing a git-push. # return handler to write to this file full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path) if not os.path.exists(os.path.dirname(full_path)): os.makedirs(os.path.dirname(full_path)) handle = open(full_path, 'wb') self.streamed_files[path] = handle class Stream(): def __init__(self, git): self.git = git def write(self, data): if path not in self.git.streamed_files: # already committed to server return if hasattr(data, 'encode'): data = data.encode("utf-8", 'replace') try: self.git.stream_files_lock.acquire() if not handle.closed: handle.write(data) handle.flush() except IOError as e: handle.close() if 'No space left' in e.__str__(): sys.stderr.write(traceback.format_exc() + '\n') self.git.logger.error(e.__str__()) finally: self.git.stream_files_lock.release() if self.git.client.online is not False: self.git.client.send({'type': 'stream-blob', 'path': path, 'data': data}, channel='' if fast_lane else 'files') return Stream(self)
[ "def", "stream_file", "(", "self", ",", "path", ",", "fast_lane", "=", "True", ")", ":", "# create temp file", "# open temp file", "# register stream file and write locally", "# on end() git_commit that file locally", "# create socket connection to server", "# stream file to server", "# on end() send server end signal, so he can store its content in git as blob as well.", "# A git push would detect that both sides have the same content already,", "# except when server connection broke between start() and end().", "# Result -> already transmitted logs/channel data (probably many MBs) won't transfered twice", "# when doing a git-push.", "# return handler to write to this file", "full_path", "=", "os", ".", "path", ".", "normpath", "(", "self", ".", "temp_path", "+", "'/stream-blob/'", "+", "self", ".", "job_id", "+", "'/'", "+", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "full_path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "full_path", ")", ")", "handle", "=", "open", "(", "full_path", ",", "'wb'", ")", "self", ".", "streamed_files", "[", "path", "]", "=", "handle", "class", "Stream", "(", ")", ":", "def", "__init__", "(", "self", ",", "git", ")", ":", "self", ".", "git", "=", "git", "def", "write", "(", "self", ",", "data", ")", ":", "if", "path", "not", "in", "self", ".", "git", ".", "streamed_files", ":", "# already committed to server", "return", "if", "hasattr", "(", "data", ",", "'encode'", ")", ":", "data", "=", "data", ".", "encode", "(", "\"utf-8\"", ",", "'replace'", ")", "try", ":", "self", ".", "git", ".", "stream_files_lock", ".", "acquire", "(", ")", "if", "not", "handle", ".", "closed", ":", "handle", ".", "write", "(", "data", ")", "handle", ".", "flush", "(", ")", "except", "IOError", "as", "e", ":", "handle", ".", "close", "(", ")", "if", "'No space left'", "in", "e", ".", "__str__", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "traceback", ".", "format_exc", "(", ")", "+", "'\\n'", ")", "self", ".", "git", ".", "logger", ".", "error", "(", "e", ".", "__str__", "(", ")", ")", "finally", ":", "self", ".", "git", ".", "stream_files_lock", ".", "release", "(", ")", "if", "self", ".", "git", ".", "client", ".", "online", "is", "not", "False", ":", "self", ".", "git", ".", "client", ".", "send", "(", "{", "'type'", ":", "'stream-blob'", ",", "'path'", ":", "path", ",", "'data'", ":", "data", "}", ",", "channel", "=", "''", "if", "fast_lane", "else", "'files'", ")", "return", "Stream", "(", "self", ")" ]
Create a temp file, stream it to the server if online and append its content using the write() method. This makes sure that we have all newest data of this file on the server directly. At the end of the job, the content the server received is stored as git blob on the server. It is then committed locally and pushed. Git detects that the server already has the version (through the continuous streaming) and won't push it again. Very handy for rather large files that will append over time (like channel data, logs) Example: self.log_stream = git.stream_file('log.txt') self.log_stream.write("new line\n"); self.log_stream.write("another line\n");
[ "Create", "a", "temp", "file", "stream", "it", "to", "the", "server", "if", "online", "and", "append", "its", "content", "using", "the", "write", "()", "method", ".", "This", "makes", "sure", "that", "we", "have", "all", "newest", "data", "of", "this", "file", "on", "the", "server", "directly", ".", "At", "the", "end", "of", "the", "job", "the", "content", "the", "server", "received", "is", "stored", "as", "git", "blob", "on", "the", "server", ".", "It", "is", "then", "committed", "locally", "and", "pushed", ".", "Git", "detects", "that", "the", "server", "already", "has", "the", "version", "(", "through", "the", "continuous", "streaming", ")", "and", "won", "t", "push", "it", "again", ".", "Very", "handy", "for", "rather", "large", "files", "that", "will", "append", "over", "time", "(", "like", "channel", "data", "logs", ")", "Example", ":", "self", ".", "log_stream", "=", "git", ".", "stream_file", "(", "log", ".", "txt", ")", "self", ".", "log_stream", ".", "write", "(", "new", "line", "\\", "n", ")", ";", "self", ".", "log_stream", ".", "write", "(", "another", "line", "\\", "n", ")", ";" ]
python
train
40.557143
UDST/urbansim
urbansim/models/regression.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L546-L559
def add_model(self, model): """ Add a `RegressionModel` instance. Parameters ---------- model : `RegressionModel` Should have a ``.name`` attribute matching one of the groupby segments. """ logger.debug( 'adding model {} to group {}'.format(model.name, self.name)) self.models[model.name] = model
[ "def", "add_model", "(", "self", ",", "model", ")", ":", "logger", ".", "debug", "(", "'adding model {} to group {}'", ".", "format", "(", "model", ".", "name", ",", "self", ".", "name", ")", ")", "self", ".", "models", "[", "model", ".", "name", "]", "=", "model" ]
Add a `RegressionModel` instance. Parameters ---------- model : `RegressionModel` Should have a ``.name`` attribute matching one of the groupby segments.
[ "Add", "a", "RegressionModel", "instance", "." ]
python
train
27.5
intuition-io/intuition
intuition/api/portfolio.py
https://github.com/intuition-io/intuition/blob/cd517e6b3b315a743eb4d0d0dc294e264ab913ce/intuition/api/portfolio.py#L77-L84
def update(self, portfolio, date, perfs=None): ''' Actualizes the portfolio universe with the alog state ''' # Make the manager aware of current simulation self.portfolio = portfolio self.perfs = perfs self.date = date
[ "def", "update", "(", "self", ",", "portfolio", ",", "date", ",", "perfs", "=", "None", ")", ":", "# Make the manager aware of current simulation", "self", ".", "portfolio", "=", "portfolio", "self", ".", "perfs", "=", "perfs", "self", ".", "date", "=", "date" ]
Actualizes the portfolio universe with the alog state
[ "Actualizes", "the", "portfolio", "universe", "with", "the", "alog", "state" ]
python
train
33.375
robotools/fontMath
Lib/fontMath/mathGlyph.py
https://github.com/robotools/fontMath/blob/6abcb9d5a1ca19788fbde4418d7b5630c60990d8/Lib/fontMath/mathGlyph.py#L276-L286
def drawPoints(self, pointPen, filterRedundantPoints=False): """draw self using pointPen""" if filterRedundantPoints: pointPen = FilterRedundantPointPen(pointPen) for contour in self.contours: pointPen.beginPath(identifier=contour["identifier"]) for segmentType, pt, smooth, name, identifier in contour["points"]: pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) pointPen.endPath() for component in self.components: pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
[ "def", "drawPoints", "(", "self", ",", "pointPen", ",", "filterRedundantPoints", "=", "False", ")", ":", "if", "filterRedundantPoints", ":", "pointPen", "=", "FilterRedundantPointPen", "(", "pointPen", ")", "for", "contour", "in", "self", ".", "contours", ":", "pointPen", ".", "beginPath", "(", "identifier", "=", "contour", "[", "\"identifier\"", "]", ")", "for", "segmentType", ",", "pt", ",", "smooth", ",", "name", ",", "identifier", "in", "contour", "[", "\"points\"", "]", ":", "pointPen", ".", "addPoint", "(", "pt", "=", "pt", ",", "segmentType", "=", "segmentType", ",", "smooth", "=", "smooth", ",", "name", "=", "name", ",", "identifier", "=", "identifier", ")", "pointPen", ".", "endPath", "(", ")", "for", "component", "in", "self", ".", "components", ":", "pointPen", ".", "addComponent", "(", "component", "[", "\"baseGlyph\"", "]", ",", "component", "[", "\"transformation\"", "]", ",", "identifier", "=", "component", "[", "\"identifier\"", "]", ")" ]
draw self using pointPen
[ "draw", "self", "using", "pointPen" ]
python
train
61.272727
ethereum/py-evm
eth/chains/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L545-L551
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock: """ Returns the requested block as specified by block hash. """ validate_word(block_hash, title="Block Hash") block_header = self.get_block_header_by_hash(block_hash) return self.get_block_by_header(block_header)
[ "def", "get_block_by_hash", "(", "self", ",", "block_hash", ":", "Hash32", ")", "->", "BaseBlock", ":", "validate_word", "(", "block_hash", ",", "title", "=", "\"Block Hash\"", ")", "block_header", "=", "self", ".", "get_block_header_by_hash", "(", "block_hash", ")", "return", "self", ".", "get_block_by_header", "(", "block_header", ")" ]
Returns the requested block as specified by block hash.
[ "Returns", "the", "requested", "block", "as", "specified", "by", "block", "hash", "." ]
python
train
45.142857
SoCo/SoCo
dev_tools/analyse_ws.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L268-L310
def __update_window(self, width, height, message_no, page_no): """ Update the window with the menu and the new text """ file_exists_label = '-F-ILE' if not os.path.exists(self.__create_file_name(message_no)): file_exists_label = '(f)ile' # Clear the screen if PLATFORM == 'win32': # Ugly hack until someone figures out a better way for Windows # probably something with a cls command, but I cannot test it for _ in range(50): print else: sys.stdout.write('\x1b[2J\x1b[H') # Clear screen # Content content = self.messages[message_no].output.rstrip('\n') out = content if self.args.color: out = pygments.highlight(content, XmlLexer(), TerminalFormatter()) # Paging functionality if message_no not in self.pages: self._form_pages(message_no, content, out, height, width) # Coerce in range page_no = max(min(len(self.pages[message_no]) - 1, page_no), 0) page_content = self.pages[message_no][page_no] # Menu max_message = str(len(self.messages) - 1) position_string = u'{{0: >{0}}}/{{1: <{0}}}'.format(len(max_message)) position_string = position_string.format(message_no, max_message) # Assume less than 100 pages current_max_page = len(self.pages[message_no]) - 1 pages_string = u'{0: >2}/{1: <2}'.format(page_no, current_max_page) menu = (u'(b)rowser | {0} | Message {1} \u2193 (s)\u2191 (w) | ' u'Page {2} \u2190 (a)\u2192 (d) | (q)uit\n{3}').\ format(file_exists_label, position_string, pages_string, '-' * width) print menu print page_content return page_no
[ "def", "__update_window", "(", "self", ",", "width", ",", "height", ",", "message_no", ",", "page_no", ")", ":", "file_exists_label", "=", "'-F-ILE'", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "__create_file_name", "(", "message_no", ")", ")", ":", "file_exists_label", "=", "'(f)ile'", "# Clear the screen", "if", "PLATFORM", "==", "'win32'", ":", "# Ugly hack until someone figures out a better way for Windows", "# probably something with a cls command, but I cannot test it", "for", "_", "in", "range", "(", "50", ")", ":", "print", "else", ":", "sys", ".", "stdout", ".", "write", "(", "'\\x1b[2J\\x1b[H'", ")", "# Clear screen", "# Content", "content", "=", "self", ".", "messages", "[", "message_no", "]", ".", "output", ".", "rstrip", "(", "'\\n'", ")", "out", "=", "content", "if", "self", ".", "args", ".", "color", ":", "out", "=", "pygments", ".", "highlight", "(", "content", ",", "XmlLexer", "(", ")", ",", "TerminalFormatter", "(", ")", ")", "# Paging functionality", "if", "message_no", "not", "in", "self", ".", "pages", ":", "self", ".", "_form_pages", "(", "message_no", ",", "content", ",", "out", ",", "height", ",", "width", ")", "# Coerce in range", "page_no", "=", "max", "(", "min", "(", "len", "(", "self", ".", "pages", "[", "message_no", "]", ")", "-", "1", ",", "page_no", ")", ",", "0", ")", "page_content", "=", "self", ".", "pages", "[", "message_no", "]", "[", "page_no", "]", "# Menu", "max_message", "=", "str", "(", "len", "(", "self", ".", "messages", ")", "-", "1", ")", "position_string", "=", "u'{{0: >{0}}}/{{1: <{0}}}'", ".", "format", "(", "len", "(", "max_message", ")", ")", "position_string", "=", "position_string", ".", "format", "(", "message_no", ",", "max_message", ")", "# Assume less than 100 pages", "current_max_page", "=", "len", "(", "self", ".", "pages", "[", "message_no", "]", ")", "-", "1", "pages_string", "=", "u'{0: >2}/{1: <2}'", ".", "format", "(", "page_no", ",", "current_max_page", ")", "menu", "=", "(", "u'(b)rowser | {0} | Message {1} \\u2193 (s)\\u2191 (w) | '", "u'Page {2} \\u2190 (a)\\u2192 (d) | (q)uit\\n{3}'", ")", ".", "format", "(", "file_exists_label", ",", "position_string", ",", "pages_string", ",", "'-'", "*", "width", ")", "print", "menu", "print", "page_content", "return", "page_no" ]
Update the window with the menu and the new text
[ "Update", "the", "window", "with", "the", "menu", "and", "the", "new", "text" ]
python
train
41.348837
tensorflow/datasets
tensorflow_datasets/translate/wmt.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/translate/wmt.py#L838-L858
def _parse_tmx(path): """Generates examples from TMX file.""" def _get_tuv_lang(tuv): for k, v in tuv.items(): if k.endswith("}lang"): return v raise AssertionError("Language not found in `tuv` attributes.") def _get_tuv_seg(tuv): segs = tuv.findall("seg") assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) return segs[0].text with tf.io.gfile.GFile(path) as f: for _, elem in ElementTree.iterparse(f): if elem.tag == "tu": yield { _get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv") } elem.clear()
[ "def", "_parse_tmx", "(", "path", ")", ":", "def", "_get_tuv_lang", "(", "tuv", ")", ":", "for", "k", ",", "v", "in", "tuv", ".", "items", "(", ")", ":", "if", "k", ".", "endswith", "(", "\"}lang\"", ")", ":", "return", "v", "raise", "AssertionError", "(", "\"Language not found in `tuv` attributes.\"", ")", "def", "_get_tuv_seg", "(", "tuv", ")", ":", "segs", "=", "tuv", ".", "findall", "(", "\"seg\"", ")", "assert", "len", "(", "segs", ")", "==", "1", ",", "\"Invalid number of segments: %d\"", "%", "len", "(", "segs", ")", "return", "segs", "[", "0", "]", ".", "text", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "path", ")", "as", "f", ":", "for", "_", ",", "elem", "in", "ElementTree", ".", "iterparse", "(", "f", ")", ":", "if", "elem", ".", "tag", "==", "\"tu\"", ":", "yield", "{", "_get_tuv_lang", "(", "tuv", ")", ":", "_get_tuv_seg", "(", "tuv", ")", "for", "tuv", "in", "elem", ".", "iterfind", "(", "\"tuv\"", ")", "}", "elem", ".", "clear", "(", ")" ]
Generates examples from TMX file.
[ "Generates", "examples", "from", "TMX", "file", "." ]
python
train
29.52381
gregreen/dustmaps
dustmaps/iphas.py
https://github.com/gregreen/dustmaps/blob/c8f571a71da0d951bf8ea865621bee14492bdfd9/dustmaps/iphas.py#L76-L212
def query(self, coords, mode='random_sample'): """ Returns A0 at the given coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Five different query modes are available: ``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``, ``'median'`` and ``'mean'``. The ``mode`` determines how the output will reflect the probabilistic nature of the IPHAS dust map. Returns: Monochromatic extinction, A0, at the specified coordinates, in mags. The shape of the output depends on the ``mode``, and on whether ``coords`` contains distances. If ``coords`` does not specify distance(s), then the shape of the output begins with `coords.shape`. If `coords` does specify distance(s), then the shape of the output begins with ``coords.shape + ([number of distance bins],)``. If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a random sample of reddening is given. If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If ``mode`` is ``'median'``, then at each coordinate/distance, the median reddening is returned. If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean reddening is returned. Finally, if ``mode`` is ``'samples'``, then all at each coordinate/distance, all samples are returned. """ # Check that the query mode is supported valid_modes = [ 'random_sample', 'random_sample_per_pix', 'samples', 'median', 'mean'] if mode not in valid_modes: raise ValueError( '"{}" is not a valid `mode`. Valid modes are:\n' ' {}'.format(mode, valid_modes)) n_coords_ret = coords.shape[0] # Determine if distance has been requested has_dist = hasattr(coords.distance, 'kpc') d = coords.distance.kpc if has_dist else None # Convert coordinates to pixel indices pix_idx = self._coords2idx(coords) # Determine which coordinates are out of bounds mask_idx = (pix_idx == self._n_pix) if np.any(mask_idx): pix_idx[mask_idx] = 0 # Which samples to extract if mode == 'random_sample': samp_idx = np.random.randint(0, self._n_samples, pix_idx.size) n_samp_ret = 1 elif mode == 'random_sample_per_pix': samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx] n_sample_ret = 1 else: samp_idx = slice(None) n_samp_ret = self._n_samples # Which distances to extract if has_dist: d = coords.distance.pc dist_idx_ceil = np.searchsorted(self._dists, d) if isinstance(samp_idx, slice): ret = np.empty((n_coords_ret, n_samp_ret), dtype='f4') else: ret = np.empty((n_coords_ret,), dtype='f4') # d < d(nearest distance slice) idx_near = (dist_idx_ceil == 0) if np.any(idx_near): a = d[idx_near] / self._dists[0] if isinstance(samp_idx, slice): ret[idx_near] = a[:,None] * self._data['A0'][pix_idx[idx_near], 0, samp_idx] else: ret[idx_near] = a[:] * self._data['A0'][pix_idx[idx_near], 0, samp_idx[idx_near]] # d > d(farthest distance slice) idx_far = (dist_idx_ceil == self._n_dists) if np.any(idx_far): if isinstance(samp_idx, slice): ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx] else: ret[idx_far] = self._data['A0'][pix_idx[idx_far], -1, samp_idx[idx_far]] # d(nearest distance slice) < d < d(farthest distance slice) idx_btw = ~idx_near & ~idx_far if np.any(idx_btw): d_ceil = self._dists[dist_idx_ceil[idx_btw]] d_floor = self._dists[dist_idx_ceil[idx_btw]-1] a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor) if isinstance(samp_idx, slice): ret[idx_btw] = ( (1.-a[:,None]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx] + a[:,None] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx]) else: ret[idx_btw] = ( (1.-a[:]) * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw], samp_idx[idx_btw]] + a[:] * self._data['A0'][pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1, samp_idx[idx_btw]]) else: # TODO: Harmonize order of distances & samples with Bayestar. ret = self._data['A0'][pix_idx, :, samp_idx] # Reduce the samples in the requested manner samp_axis = 1 if has_dist else 2 if mode == 'median': ret = np.median(ret, axis=samp_axis) elif mode == 'mean': ret = np.mean(ret, axis=samp_axis) if np.any(mask_idx): ret[mask_idx] = np.nan return ret
[ "def", "query", "(", "self", ",", "coords", ",", "mode", "=", "'random_sample'", ")", ":", "# Check that the query mode is supported", "valid_modes", "=", "[", "'random_sample'", ",", "'random_sample_per_pix'", ",", "'samples'", ",", "'median'", ",", "'mean'", "]", "if", "mode", "not", "in", "valid_modes", ":", "raise", "ValueError", "(", "'\"{}\" is not a valid `mode`. Valid modes are:\\n'", "' {}'", ".", "format", "(", "mode", ",", "valid_modes", ")", ")", "n_coords_ret", "=", "coords", ".", "shape", "[", "0", "]", "# Determine if distance has been requested", "has_dist", "=", "hasattr", "(", "coords", ".", "distance", ",", "'kpc'", ")", "d", "=", "coords", ".", "distance", ".", "kpc", "if", "has_dist", "else", "None", "# Convert coordinates to pixel indices", "pix_idx", "=", "self", ".", "_coords2idx", "(", "coords", ")", "# Determine which coordinates are out of bounds", "mask_idx", "=", "(", "pix_idx", "==", "self", ".", "_n_pix", ")", "if", "np", ".", "any", "(", "mask_idx", ")", ":", "pix_idx", "[", "mask_idx", "]", "=", "0", "# Which samples to extract", "if", "mode", "==", "'random_sample'", ":", "samp_idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "self", ".", "_n_samples", ",", "pix_idx", ".", "size", ")", "n_samp_ret", "=", "1", "elif", "mode", "==", "'random_sample_per_pix'", ":", "samp_idx", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "self", ".", "_n_samples", ",", "self", ".", "_n_pix", ")", "[", "pix_idx", "]", "n_sample_ret", "=", "1", "else", ":", "samp_idx", "=", "slice", "(", "None", ")", "n_samp_ret", "=", "self", ".", "_n_samples", "# Which distances to extract", "if", "has_dist", ":", "d", "=", "coords", ".", "distance", ".", "pc", "dist_idx_ceil", "=", "np", ".", "searchsorted", "(", "self", ".", "_dists", ",", "d", ")", "if", "isinstance", "(", "samp_idx", ",", "slice", ")", ":", "ret", "=", "np", ".", "empty", "(", "(", "n_coords_ret", ",", "n_samp_ret", ")", ",", "dtype", "=", "'f4'", ")", "else", ":", "ret", "=", "np", ".", "empty", "(", "(", "n_coords_ret", ",", ")", ",", "dtype", "=", "'f4'", ")", "# d < d(nearest distance slice)", "idx_near", "=", "(", "dist_idx_ceil", "==", "0", ")", "if", "np", ".", "any", "(", "idx_near", ")", ":", "a", "=", "d", "[", "idx_near", "]", "/", "self", ".", "_dists", "[", "0", "]", "if", "isinstance", "(", "samp_idx", ",", "slice", ")", ":", "ret", "[", "idx_near", "]", "=", "a", "[", ":", ",", "None", "]", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_near", "]", ",", "0", ",", "samp_idx", "]", "else", ":", "ret", "[", "idx_near", "]", "=", "a", "[", ":", "]", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_near", "]", ",", "0", ",", "samp_idx", "[", "idx_near", "]", "]", "# d > d(farthest distance slice)", "idx_far", "=", "(", "dist_idx_ceil", "==", "self", ".", "_n_dists", ")", "if", "np", ".", "any", "(", "idx_far", ")", ":", "if", "isinstance", "(", "samp_idx", ",", "slice", ")", ":", "ret", "[", "idx_far", "]", "=", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_far", "]", ",", "-", "1", ",", "samp_idx", "]", "else", ":", "ret", "[", "idx_far", "]", "=", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_far", "]", ",", "-", "1", ",", "samp_idx", "[", "idx_far", "]", "]", "# d(nearest distance slice) < d < d(farthest distance slice)", "idx_btw", "=", "~", "idx_near", "&", "~", "idx_far", "if", "np", ".", "any", "(", "idx_btw", ")", ":", "d_ceil", "=", "self", ".", "_dists", "[", "dist_idx_ceil", "[", "idx_btw", "]", "]", "d_floor", "=", "self", ".", "_dists", "[", "dist_idx_ceil", "[", "idx_btw", "]", "-", "1", "]", "a", "=", "(", "d_ceil", "-", "d", "[", "idx_btw", "]", ")", "/", "(", "d_ceil", "-", "d_floor", ")", "if", "isinstance", "(", "samp_idx", ",", "slice", ")", ":", "ret", "[", "idx_btw", "]", "=", "(", "(", "1.", "-", "a", "[", ":", ",", "None", "]", ")", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_btw", "]", ",", "dist_idx_ceil", "[", "idx_btw", "]", ",", "samp_idx", "]", "+", "a", "[", ":", ",", "None", "]", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_btw", "]", ",", "dist_idx_ceil", "[", "idx_btw", "]", "-", "1", ",", "samp_idx", "]", ")", "else", ":", "ret", "[", "idx_btw", "]", "=", "(", "(", "1.", "-", "a", "[", ":", "]", ")", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_btw", "]", ",", "dist_idx_ceil", "[", "idx_btw", "]", ",", "samp_idx", "[", "idx_btw", "]", "]", "+", "a", "[", ":", "]", "*", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", "[", "idx_btw", "]", ",", "dist_idx_ceil", "[", "idx_btw", "]", "-", "1", ",", "samp_idx", "[", "idx_btw", "]", "]", ")", "else", ":", "# TODO: Harmonize order of distances & samples with Bayestar.", "ret", "=", "self", ".", "_data", "[", "'A0'", "]", "[", "pix_idx", ",", ":", ",", "samp_idx", "]", "# Reduce the samples in the requested manner", "samp_axis", "=", "1", "if", "has_dist", "else", "2", "if", "mode", "==", "'median'", ":", "ret", "=", "np", ".", "median", "(", "ret", ",", "axis", "=", "samp_axis", ")", "elif", "mode", "==", "'mean'", ":", "ret", "=", "np", ".", "mean", "(", "ret", ",", "axis", "=", "samp_axis", ")", "if", "np", ".", "any", "(", "mask_idx", ")", ":", "ret", "[", "mask_idx", "]", "=", "np", ".", "nan", "return", "ret" ]
Returns A0 at the given coordinates. There are several different query modes, which handle the probabilistic nature of the map differently. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. mode (Optional[:obj:`str`]): Five different query modes are available: ``'random_sample'``, ``'random_sample_per_pix'``, ``'samples'``, ``'median'`` and ``'mean'``. The ``mode`` determines how the output will reflect the probabilistic nature of the IPHAS dust map. Returns: Monochromatic extinction, A0, at the specified coordinates, in mags. The shape of the output depends on the ``mode``, and on whether ``coords`` contains distances. If ``coords`` does not specify distance(s), then the shape of the output begins with `coords.shape`. If `coords` does specify distance(s), then the shape of the output begins with ``coords.shape + ([number of distance bins],)``. If ``mode`` is ``'random_sample'``, then at each coordinate/distance, a random sample of reddening is given. If ``mode`` is ``'random_sample_per_pix'``, then the sample chosen for each angular pixel of the map will be consistent. For example, if two query coordinates lie in the same map pixel, then the same random sample will be chosen from the map for both query coordinates. If ``mode`` is ``'median'``, then at each coordinate/distance, the median reddening is returned. If ``mode`` is ``'mean'``, then at each coordinate/distance, the mean reddening is returned. Finally, if ``mode`` is ``'samples'``, then all at each coordinate/distance, all samples are returned.
[ "Returns", "A0", "at", "the", "given", "coordinates", ".", "There", "are", "several", "different", "query", "modes", "which", "handle", "the", "probabilistic", "nature", "of", "the", "map", "differently", "." ]
python
train
41.518248
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L161-L164
def running_objects(self): """Return the objects associated with this workflow.""" return [obj for obj in self.database_objects if obj.status in [obj.known_statuses.RUNNING]]
[ "def", "running_objects", "(", "self", ")", ":", "return", "[", "obj", "for", "obj", "in", "self", ".", "database_objects", "if", "obj", ".", "status", "in", "[", "obj", ".", "known_statuses", ".", "RUNNING", "]", "]" ]
Return the objects associated with this workflow.
[ "Return", "the", "objects", "associated", "with", "this", "workflow", "." ]
python
train
50.75
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_sensors.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_sensors.py#L82-L93
def report(self, name, ok, msg=None, deltat=20): '''report a sensor error''' r = self.reports[name] if time.time() < r.last_report + deltat: r.ok = ok return r.last_report = time.time() if ok and not r.ok: self.say("%s OK" % name) r.ok = ok if not r.ok: self.say(msg)
[ "def", "report", "(", "self", ",", "name", ",", "ok", ",", "msg", "=", "None", ",", "deltat", "=", "20", ")", ":", "r", "=", "self", ".", "reports", "[", "name", "]", "if", "time", ".", "time", "(", ")", "<", "r", ".", "last_report", "+", "deltat", ":", "r", ".", "ok", "=", "ok", "return", "r", ".", "last_report", "=", "time", ".", "time", "(", ")", "if", "ok", "and", "not", "r", ".", "ok", ":", "self", ".", "say", "(", "\"%s OK\"", "%", "name", ")", "r", ".", "ok", "=", "ok", "if", "not", "r", ".", "ok", ":", "self", ".", "say", "(", "msg", ")" ]
report a sensor error
[ "report", "a", "sensor", "error" ]
python
train
30
juju/charm-helpers
charmhelpers/core/hookenv.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/hookenv.py#L394-L425
def config(scope=None): """ Get the juju charm configuration (scope==None) or individual key, (scope=str). The returned value is a Python data structure loaded as JSON from the Juju config command. :param scope: If set, return the value for the specified key. :type scope: Optional[str] :returns: Either the whole config as a Config, or a key from it. :rtype: Any """ global _cache_config config_cmd_line = ['config-get', '--all', '--format=json'] try: # JSON Decode Exception for Python3.5+ exc_json = json.decoder.JSONDecodeError except AttributeError: # JSON Decode Exception for Python2.7 through Python3.4 exc_json = ValueError try: if _cache_config is None: config_data = json.loads( subprocess.check_output(config_cmd_line).decode('UTF-8')) _cache_config = Config(config_data) if scope is not None: return _cache_config.get(scope) return _cache_config except (exc_json, UnicodeDecodeError) as e: log('Unable to parse output from config-get: config_cmd_line="{}" ' 'message="{}"' .format(config_cmd_line, str(e)), level=ERROR) return None
[ "def", "config", "(", "scope", "=", "None", ")", ":", "global", "_cache_config", "config_cmd_line", "=", "[", "'config-get'", ",", "'--all'", ",", "'--format=json'", "]", "try", ":", "# JSON Decode Exception for Python3.5+", "exc_json", "=", "json", ".", "decoder", ".", "JSONDecodeError", "except", "AttributeError", ":", "# JSON Decode Exception for Python2.7 through Python3.4", "exc_json", "=", "ValueError", "try", ":", "if", "_cache_config", "is", "None", ":", "config_data", "=", "json", ".", "loads", "(", "subprocess", ".", "check_output", "(", "config_cmd_line", ")", ".", "decode", "(", "'UTF-8'", ")", ")", "_cache_config", "=", "Config", "(", "config_data", ")", "if", "scope", "is", "not", "None", ":", "return", "_cache_config", ".", "get", "(", "scope", ")", "return", "_cache_config", "except", "(", "exc_json", ",", "UnicodeDecodeError", ")", "as", "e", ":", "log", "(", "'Unable to parse output from config-get: config_cmd_line=\"{}\" '", "'message=\"{}\"'", ".", "format", "(", "config_cmd_line", ",", "str", "(", "e", ")", ")", ",", "level", "=", "ERROR", ")", "return", "None" ]
Get the juju charm configuration (scope==None) or individual key, (scope=str). The returned value is a Python data structure loaded as JSON from the Juju config command. :param scope: If set, return the value for the specified key. :type scope: Optional[str] :returns: Either the whole config as a Config, or a key from it. :rtype: Any
[ "Get", "the", "juju", "charm", "configuration", "(", "scope", "==", "None", ")", "or", "individual", "key", "(", "scope", "=", "str", ")", ".", "The", "returned", "value", "is", "a", "Python", "data", "structure", "loaded", "as", "JSON", "from", "the", "Juju", "config", "command", "." ]
python
train
38.21875
pypa/pipenv
pipenv/patched/notpip/_internal/req/req_uninstall.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/req/req_uninstall.py#L337-L366
def remove(self, auto_confirm=False, verbose=False): """Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).""" if not self.paths: logger.info( "Can't uninstall '%s'. No files were found to uninstall.", self.dist.project_name, ) return dist_name_version = ( self.dist.project_name + "-" + self.dist.version ) logger.info('Uninstalling %s:', dist_name_version) with indent_log(): if auto_confirm or self._allowed_to_proceed(verbose): moved = self._moved_paths for_rename = compress_for_rename(self.paths) for path in sorted(compact(for_rename)): moved.stash(path) logger.debug('Removing file or directory %s', path) for pth in self.pth.values(): pth.remove() logger.info('Successfully uninstalled %s', dist_name_version)
[ "def", "remove", "(", "self", ",", "auto_confirm", "=", "False", ",", "verbose", "=", "False", ")", ":", "if", "not", "self", ".", "paths", ":", "logger", ".", "info", "(", "\"Can't uninstall '%s'. No files were found to uninstall.\"", ",", "self", ".", "dist", ".", "project_name", ",", ")", "return", "dist_name_version", "=", "(", "self", ".", "dist", ".", "project_name", "+", "\"-\"", "+", "self", ".", "dist", ".", "version", ")", "logger", ".", "info", "(", "'Uninstalling %s:'", ",", "dist_name_version", ")", "with", "indent_log", "(", ")", ":", "if", "auto_confirm", "or", "self", ".", "_allowed_to_proceed", "(", "verbose", ")", ":", "moved", "=", "self", ".", "_moved_paths", "for_rename", "=", "compress_for_rename", "(", "self", ".", "paths", ")", "for", "path", "in", "sorted", "(", "compact", "(", "for_rename", ")", ")", ":", "moved", ".", "stash", "(", "path", ")", "logger", ".", "debug", "(", "'Removing file or directory %s'", ",", "path", ")", "for", "pth", "in", "self", ".", "pth", ".", "values", "(", ")", ":", "pth", ".", "remove", "(", ")", "logger", ".", "info", "(", "'Successfully uninstalled %s'", ",", "dist_name_version", ")" ]
Remove paths in ``self.paths`` with confirmation (unless ``auto_confirm`` is True).
[ "Remove", "paths", "in", "self", ".", "paths", "with", "confirmation", "(", "unless", "auto_confirm", "is", "True", ")", "." ]
python
train
33.866667
nteract/papermill
papermill/preprocess.py
https://github.com/nteract/papermill/blob/7423a303f3fa22ec6d03edf5fd9700d659b5a6fa/papermill/preprocess.py#L14-L26
def preprocess(self, nb_man, resources, km=None): """ Wraps the parent class process call slightly """ with self.setup_preprocessor(nb_man.nb, resources, km=km): if self.log_output: self.log.info("Executing notebook with kernel: {}".format(self.kernel_name)) nb, resources = self.papermill_process(nb_man, resources) info_msg = self._wait_for_reply(self.kc.kernel_info()) nb.metadata['language_info'] = info_msg['content']['language_info'] self.set_widgets_metadata() return nb, resources
[ "def", "preprocess", "(", "self", ",", "nb_man", ",", "resources", ",", "km", "=", "None", ")", ":", "with", "self", ".", "setup_preprocessor", "(", "nb_man", ".", "nb", ",", "resources", ",", "km", "=", "km", ")", ":", "if", "self", ".", "log_output", ":", "self", ".", "log", ".", "info", "(", "\"Executing notebook with kernel: {}\"", ".", "format", "(", "self", ".", "kernel_name", ")", ")", "nb", ",", "resources", "=", "self", ".", "papermill_process", "(", "nb_man", ",", "resources", ")", "info_msg", "=", "self", ".", "_wait_for_reply", "(", "self", ".", "kc", ".", "kernel_info", "(", ")", ")", "nb", ".", "metadata", "[", "'language_info'", "]", "=", "info_msg", "[", "'content'", "]", "[", "'language_info'", "]", "self", ".", "set_widgets_metadata", "(", ")", "return", "nb", ",", "resources" ]
Wraps the parent class process call slightly
[ "Wraps", "the", "parent", "class", "process", "call", "slightly" ]
python
train
45.615385
wilson-eft/wilson
wilson/match/smeft.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/match/smeft.py#L206-L221
def match_all(d_SMEFT, parameters=None): """Match the SMEFT Warsaw basis onto the WET JMS basis.""" p = default_parameters.copy() if parameters is not None: # if parameters are passed in, overwrite the default values p.update(parameters) C = wilson.util.smeftutil.wcxf2arrays_symmetrized(d_SMEFT) C['vT'] = 246.22 C_WET = match_all_array(C, p) C_WET = wilson.translate.wet.rotate_down(C_WET, p) C_WET = wetutil.unscale_dict_wet(C_WET) d_WET = wilson.util.smeftutil.arrays2wcxf(C_WET) basis = wcxf.Basis['WET', 'JMS'] keys = set(d_WET.keys()) & set(basis.all_wcs) d_WET = {k: d_WET[k] for k in keys} return d_WET
[ "def", "match_all", "(", "d_SMEFT", ",", "parameters", "=", "None", ")", ":", "p", "=", "default_parameters", ".", "copy", "(", ")", "if", "parameters", "is", "not", "None", ":", "# if parameters are passed in, overwrite the default values", "p", ".", "update", "(", "parameters", ")", "C", "=", "wilson", ".", "util", ".", "smeftutil", ".", "wcxf2arrays_symmetrized", "(", "d_SMEFT", ")", "C", "[", "'vT'", "]", "=", "246.22", "C_WET", "=", "match_all_array", "(", "C", ",", "p", ")", "C_WET", "=", "wilson", ".", "translate", ".", "wet", ".", "rotate_down", "(", "C_WET", ",", "p", ")", "C_WET", "=", "wetutil", ".", "unscale_dict_wet", "(", "C_WET", ")", "d_WET", "=", "wilson", ".", "util", ".", "smeftutil", ".", "arrays2wcxf", "(", "C_WET", ")", "basis", "=", "wcxf", ".", "Basis", "[", "'WET'", ",", "'JMS'", "]", "keys", "=", "set", "(", "d_WET", ".", "keys", "(", ")", ")", "&", "set", "(", "basis", ".", "all_wcs", ")", "d_WET", "=", "{", "k", ":", "d_WET", "[", "k", "]", "for", "k", "in", "keys", "}", "return", "d_WET" ]
Match the SMEFT Warsaw basis onto the WET JMS basis.
[ "Match", "the", "SMEFT", "Warsaw", "basis", "onto", "the", "WET", "JMS", "basis", "." ]
python
train
41.5
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L863-L877
def print_tensor(td_tensor, indent="| ", max_depth=-1, depth=0): """ print_tensor(td_tensor, indent=" ", max_depth=-1) Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each tensor and each op count as a level. """ offset = depth * indent line = "td tensor: %s" % td_tensor.name if td_tensor.value is not None: line += " (%s)" % (",".join(str(i) for i in td_tensor.value.shape),) print(offset + line) if td_tensor.op and (max_depth < 0 or max_depth > depth): print_op(td_tensor.op, indent=indent, max_depth=max_depth, depth=depth+1)
[ "def", "print_tensor", "(", "td_tensor", ",", "indent", "=", "\"| \"", ",", "max_depth", "=", "-", "1", ",", "depth", "=", "0", ")", ":", "offset", "=", "depth", "*", "indent", "line", "=", "\"td tensor: %s\"", "%", "td_tensor", ".", "name", "if", "td_tensor", ".", "value", "is", "not", "None", ":", "line", "+=", "\" (%s)\"", "%", "(", "\",\"", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "td_tensor", ".", "value", ".", "shape", ")", ",", ")", "print", "(", "offset", "+", "line", ")", "if", "td_tensor", ".", "op", "and", "(", "max_depth", "<", "0", "or", "max_depth", ">", "depth", ")", ":", "print_op", "(", "td_tensor", ".", "op", ",", "indent", "=", "indent", ",", "max_depth", "=", "max_depth", ",", "depth", "=", "depth", "+", "1", ")" ]
print_tensor(td_tensor, indent=" ", max_depth=-1) Prints the dependency graph of a :py:class:`Tensor` *td_tensor*, where each new level is indented by *indent*. When *max_depth* is positive, the graph is truncated at that depth, where each tensor and each op count as a level.
[ "print_tensor", "(", "td_tensor", "indent", "=", "max_depth", "=", "-", "1", ")", "Prints", "the", "dependency", "graph", "of", "a", ":", "py", ":", "class", ":", "Tensor", "*", "td_tensor", "*", "where", "each", "new", "level", "is", "indented", "by", "*", "indent", "*", ".", "When", "*", "max_depth", "*", "is", "positive", "the", "graph", "is", "truncated", "at", "that", "depth", "where", "each", "tensor", "and", "each", "op", "count", "as", "a", "level", "." ]
python
train
47.733333