repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
spry-group/python-vultr
vultr/v1_snapshot.py
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_snapshot.py#L10-L19
def create(self, subid, params=None): ''' /v1/snapshot/create POST - account Create a snapshot from an existing virtual machine. The virtual machine does not need to be stopped. Link: https://www.vultr.com/api/#snapshot_create ''' params = update_params(params, {'SUBID': subid}) return self.request('/v1/snapshot/create', params, 'POST')
[ "def", "create", "(", "self", ",", "subid", ",", "params", "=", "None", ")", ":", "params", "=", "update_params", "(", "params", ",", "{", "'SUBID'", ":", "subid", "}", ")", "return", "self", ".", "request", "(", "'/v1/snapshot/create'", ",", "params", ",", "'POST'", ")" ]
/v1/snapshot/create POST - account Create a snapshot from an existing virtual machine. The virtual machine does not need to be stopped. Link: https://www.vultr.com/api/#snapshot_create
[ "/", "v1", "/", "snapshot", "/", "create", "POST", "-", "account", "Create", "a", "snapshot", "from", "an", "existing", "virtual", "machine", ".", "The", "virtual", "machine", "does", "not", "need", "to", "be", "stopped", "." ]
python
train
gbowerman/azurerm
examples/list_vmss_vms.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/list_vmss_vms.py#L13-L44
def main(): '''main routine''' # process arguments if len(sys.argv) < 3: usage() rgname = sys.argv[1] vmss_name = sys.argv[2] # Load Azure app defaults try: with open('azurermconfig.json') as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit('Error: Expecting azurermonfig.json in current folder') tenant_id = config_data['tenantId'] app_id = config_data['appId'] app_secret = config_data['appSecret'] subscription_id = config_data['subscriptionId'] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) instanceviewlist = azurerm.list_vmss_vm_instance_view(access_token, subscription_id, rgname, vmss_name) for vmi in instanceviewlist['value']: instance_id = vmi['instanceId'] upgrade_domain = vmi['properties']['instanceView']['platformUpdateDomain'] fault_domain = vmi['properties']['instanceView']['platformFaultDomain'] print('Instance ID: ' + instance_id + ', UD: ' + str(upgrade_domain) + ', FD: ' + str(fault_domain))
[ "def", "main", "(", ")", ":", "# process arguments", "if", "len", "(", "sys", ".", "argv", ")", "<", "3", ":", "usage", "(", ")", "rgname", "=", "sys", ".", "argv", "[", "1", "]", "vmss_name", "=", "sys", ".", "argv", "[", "2", "]", "# Load Azure app defaults", "try", ":", "with", "open", "(", "'azurermconfig.json'", ")", "as", "config_file", ":", "config_data", "=", "json", ".", "load", "(", "config_file", ")", "except", "FileNotFoundError", ":", "sys", ".", "exit", "(", "'Error: Expecting azurermonfig.json in current folder'", ")", "tenant_id", "=", "config_data", "[", "'tenantId'", "]", "app_id", "=", "config_data", "[", "'appId'", "]", "app_secret", "=", "config_data", "[", "'appSecret'", "]", "subscription_id", "=", "config_data", "[", "'subscriptionId'", "]", "access_token", "=", "azurerm", ".", "get_access_token", "(", "tenant_id", ",", "app_id", ",", "app_secret", ")", "instanceviewlist", "=", "azurerm", ".", "list_vmss_vm_instance_view", "(", "access_token", ",", "subscription_id", ",", "rgname", ",", "vmss_name", ")", "for", "vmi", "in", "instanceviewlist", "[", "'value'", "]", ":", "instance_id", "=", "vmi", "[", "'instanceId'", "]", "upgrade_domain", "=", "vmi", "[", "'properties'", "]", "[", "'instanceView'", "]", "[", "'platformUpdateDomain'", "]", "fault_domain", "=", "vmi", "[", "'properties'", "]", "[", "'instanceView'", "]", "[", "'platformFaultDomain'", "]", "print", "(", "'Instance ID: '", "+", "instance_id", "+", "', UD: '", "+", "str", "(", "upgrade_domain", ")", "+", "', FD: '", "+", "str", "(", "fault_domain", ")", ")" ]
main routine
[ "main", "routine" ]
python
train
annoviko/pyclustering
pyclustering/core/som_wrapper.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/core/som_wrapper.py#L174-L188
def som_get_capture_objects(som_pointer): """! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map. """ ccore = ccore_library.get() ccore.som_get_capture_objects.restype = POINTER(pyclustering_package) package = ccore.som_get_capture_objects(som_pointer) result = package_extractor(package).extract() return result
[ "def", "som_get_capture_objects", "(", "som_pointer", ")", ":", "ccore", "=", "ccore_library", ".", "get", "(", ")", "ccore", ".", "som_get_capture_objects", ".", "restype", "=", "POINTER", "(", "pyclustering_package", ")", "package", "=", "ccore", ".", "som_get_capture_objects", "(", "som_pointer", ")", "result", "=", "package_extractor", "(", "package", ")", ".", "extract", "(", ")", "return", "result" ]
! @brief Returns list of indexes of captured objects by each neuron. @param[in] som_pointer (c_pointer): pointer to object of self-organized map.
[ "!" ]
python
valid
CxAalto/gtfspy
gtfspy/routing/node_profile_multiobjective.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_multiobjective.py#L58-L79
def _check_dep_time_is_valid(self, dep_time): """ A simple checker, that connections are coming in descending order of departure time and that no departure time has been "skipped". Parameters ---------- dep_time Returns ------- None """ assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time." dep_time_index = self.dep_times_to_index[dep_time] if self._min_dep_time < float('inf'): min_dep_index = self.dep_times_to_index[self._min_dep_time] assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \ "dep times should be ordered sequentially" else: assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)" self._min_dep_time = dep_time
[ "def", "_check_dep_time_is_valid", "(", "self", ",", "dep_time", ")", ":", "assert", "dep_time", "<=", "self", ".", "_min_dep_time", ",", "\"Labels should be entered in decreasing order of departure time.\"", "dep_time_index", "=", "self", ".", "dep_times_to_index", "[", "dep_time", "]", "if", "self", ".", "_min_dep_time", "<", "float", "(", "'inf'", ")", ":", "min_dep_index", "=", "self", ".", "dep_times_to_index", "[", "self", ".", "_min_dep_time", "]", "assert", "min_dep_index", "==", "dep_time_index", "or", "(", "min_dep_index", "==", "dep_time_index", "-", "1", ")", ",", "\"dep times should be ordered sequentially\"", "else", ":", "assert", "dep_time_index", "is", "0", ",", "\"first dep_time index should be zero (ensuring that all connections are properly handled)\"", "self", ".", "_min_dep_time", "=", "dep_time" ]
A simple checker, that connections are coming in descending order of departure time and that no departure time has been "skipped". Parameters ---------- dep_time Returns ------- None
[ "A", "simple", "checker", "that", "connections", "are", "coming", "in", "descending", "order", "of", "departure", "time", "and", "that", "no", "departure", "time", "has", "been", "skipped", "." ]
python
valid
gwastro/pycbc-glue
pycbc_glue/segments.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/segments.py#L1154-L1164
def extend(self, other): """ Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed. """ for key, value in other.iteritems(): if key not in self: self[key] = _shallowcopy(value) else: self[key].extend(value)
[ "def", "extend", "(", "self", ",", "other", ")", ":", "for", "key", ",", "value", "in", "other", ".", "iteritems", "(", ")", ":", "if", "key", "not", "in", "self", ":", "self", "[", "key", "]", "=", "_shallowcopy", "(", "value", ")", "else", ":", "self", "[", "key", "]", ".", "extend", "(", "value", ")" ]
Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed.
[ "Appends", "the", "segmentlists", "from", "other", "to", "the", "corresponding", "segmentlists", "in", "self", "adding", "new", "segmentslists", "to", "self", "as", "needed", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/oup_package.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/oup_package.py#L197-L225
def _extract_packages(self): """ Extract a package in a new directory. """ if not hasattr(self, "retrieved_packages_unpacked"): self.retrieved_packages_unpacked = [self.package_name] for path in self.retrieved_packages_unpacked: package_name = basename(path) self.path_unpacked = join(CFG_UNPACKED_FILES, package_name.split('.')[0]) self.logger.debug("Extracting package: %s" % (path.split("/")[-1],)) try: if "_archival_pdf" in self.path_unpacked: self.path_unpacked = (self.path_unpacked .rstrip("_archival_pdf")) ZipFile(path).extractall(join(self.path_unpacked, "archival_pdfs")) else: ZipFile(path).extractall(self.path_unpacked) #TarFile.open(path).extractall(self.path_unpacked) except Exception: register_exception(alert_admin=True, prefix="OUP error extracting package.") self.logger.error("Error extraction package file: %s" % (path,)) if hasattr(self, "path_unpacked"): return self.path_unpacked
[ "def", "_extract_packages", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"retrieved_packages_unpacked\"", ")", ":", "self", ".", "retrieved_packages_unpacked", "=", "[", "self", ".", "package_name", "]", "for", "path", "in", "self", ".", "retrieved_packages_unpacked", ":", "package_name", "=", "basename", "(", "path", ")", "self", ".", "path_unpacked", "=", "join", "(", "CFG_UNPACKED_FILES", ",", "package_name", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "self", ".", "logger", ".", "debug", "(", "\"Extracting package: %s\"", "%", "(", "path", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ",", ")", ")", "try", ":", "if", "\"_archival_pdf\"", "in", "self", ".", "path_unpacked", ":", "self", ".", "path_unpacked", "=", "(", "self", ".", "path_unpacked", ".", "rstrip", "(", "\"_archival_pdf\"", ")", ")", "ZipFile", "(", "path", ")", ".", "extractall", "(", "join", "(", "self", ".", "path_unpacked", ",", "\"archival_pdfs\"", ")", ")", "else", ":", "ZipFile", "(", "path", ")", ".", "extractall", "(", "self", ".", "path_unpacked", ")", "#TarFile.open(path).extractall(self.path_unpacked)", "except", "Exception", ":", "register_exception", "(", "alert_admin", "=", "True", ",", "prefix", "=", "\"OUP error extracting package.\"", ")", "self", ".", "logger", ".", "error", "(", "\"Error extraction package file: %s\"", "%", "(", "path", ",", ")", ")", "if", "hasattr", "(", "self", ",", "\"path_unpacked\"", ")", ":", "return", "self", ".", "path_unpacked" ]
Extract a package in a new directory.
[ "Extract", "a", "package", "in", "a", "new", "directory", "." ]
python
valid
kyan001/PyKyanToolKit
KyanToolKit.py
https://github.com/kyan001/PyKyanToolKit/blob/a3974fcd45ce41f743b4a3d42af961fedea8fda8/KyanToolKit.py#L190-L211
def ajax(cls, url, param={}, method='get'): """Get info by ajax Args: url: string Returns: dict: json decoded into a dict """ param = urllib.parse.urlencode(param) if method.lower() == 'get': req = urllib.request.Request(url + '?' + param) elif method.lower() == 'post': param = param.encode('utf-8') req = urllib.request.Request(url, data=param) else: raise Exception("invalid method '{}' (GET/POST)".format(method)) rsp = urllib.request.urlopen(req) if rsp: rsp_json = rsp.read().decode('utf-8') rsp_dict = json.loads(rsp_json) return rsp_dict return None
[ "def", "ajax", "(", "cls", ",", "url", ",", "param", "=", "{", "}", ",", "method", "=", "'get'", ")", ":", "param", "=", "urllib", ".", "parse", ".", "urlencode", "(", "param", ")", "if", "method", ".", "lower", "(", ")", "==", "'get'", ":", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", "+", "'?'", "+", "param", ")", "elif", "method", ".", "lower", "(", ")", "==", "'post'", ":", "param", "=", "param", ".", "encode", "(", "'utf-8'", ")", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ",", "data", "=", "param", ")", "else", ":", "raise", "Exception", "(", "\"invalid method '{}' (GET/POST)\"", ".", "format", "(", "method", ")", ")", "rsp", "=", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "if", "rsp", ":", "rsp_json", "=", "rsp", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "rsp_dict", "=", "json", ".", "loads", "(", "rsp_json", ")", "return", "rsp_dict", "return", "None" ]
Get info by ajax Args: url: string Returns: dict: json decoded into a dict
[ "Get", "info", "by", "ajax" ]
python
train
C-Pro/pgdocgen
pgdocgen/ddlobject/schema.py
https://github.com/C-Pro/pgdocgen/blob/b5d95c1bc1b38e3c7977aeddc20793a7b0f5d0fe/pgdocgen/ddlobject/schema.py#L12-L40
def read_contents(self, name, conn): '''Read schema tables''' sql = '''select c.relname, d.description, case c.relkind when 'r' then 'table' when 'v' then 'view' when 'm' then 'materialized view' when 'f' then 'foreign table' end as table_type from pg_catalog.pg_class c join pg_catalog.pg_namespace n on n.oid = c.relnamespace left join pg_catalog.pg_description d on (d.objoid = c.oid) where c.relkind in ('r','v','m','f') and n.nspname = %s and n.nspname not like 'pg\_%%' and n.nspname not in ('information_schema') and coalesce(d.objsubid,0) = 0 order by c.relname''' log = get_logger() cur = conn.cursor() cur.execute(sql, [name]) tables = cur.fetchall() from pgdocgen.ddlobject.table import Table for table in tables: table_obj = Table(name, table[0], table[1], table[2], conn) log.debug('{}: {}'.format(table[0], table[1])) self.contents.append(copy.deepcopy(table_obj)) cur.close()
[ "def", "read_contents", "(", "self", ",", "name", ",", "conn", ")", ":", "sql", "=", "'''select c.relname,\n d.description,\n case c.relkind\n when 'r' then 'table'\n when 'v' then 'view'\n when 'm' then 'materialized view'\n when 'f' then 'foreign table'\n end as table_type\n from pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n left join pg_catalog.pg_description d on (d.objoid = c.oid)\n where c.relkind in ('r','v','m','f') and\n n.nspname = %s and\n n.nspname not like 'pg\\_%%' and\n n.nspname not in ('information_schema') and\n coalesce(d.objsubid,0) = 0\n order by c.relname'''", "log", "=", "get_logger", "(", ")", "cur", "=", "conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "sql", ",", "[", "name", "]", ")", "tables", "=", "cur", ".", "fetchall", "(", ")", "from", "pgdocgen", ".", "ddlobject", ".", "table", "import", "Table", "for", "table", "in", "tables", ":", "table_obj", "=", "Table", "(", "name", ",", "table", "[", "0", "]", ",", "table", "[", "1", "]", ",", "table", "[", "2", "]", ",", "conn", ")", "log", ".", "debug", "(", "'{}: {}'", ".", "format", "(", "table", "[", "0", "]", ",", "table", "[", "1", "]", ")", ")", "self", ".", "contents", ".", "append", "(", "copy", ".", "deepcopy", "(", "table_obj", ")", ")", "cur", ".", "close", "(", ")" ]
Read schema tables
[ "Read", "schema", "tables" ]
python
train
mozilla-releng/mozapkpublisher
mozapkpublisher/common/store_l10n.py
https://github.com/mozilla-releng/mozapkpublisher/blob/df61034220153cbb98da74c8ef6de637f9185e12/mozapkpublisher/common/store_l10n.py#L107-L112
def _get_list_of_completed_locales(product, channel): """ Get all the translated locales supported by Google play So, locale unsupported by Google play won't be downloaded Idem for not translated locale """ return utils.load_json_url(_ALL_LOCALES_URL.format(product=product, channel=channel))
[ "def", "_get_list_of_completed_locales", "(", "product", ",", "channel", ")", ":", "return", "utils", ".", "load_json_url", "(", "_ALL_LOCALES_URL", ".", "format", "(", "product", "=", "product", ",", "channel", "=", "channel", ")", ")" ]
Get all the translated locales supported by Google play So, locale unsupported by Google play won't be downloaded Idem for not translated locale
[ "Get", "all", "the", "translated", "locales", "supported", "by", "Google", "play", "So", "locale", "unsupported", "by", "Google", "play", "won", "t", "be", "downloaded", "Idem", "for", "not", "translated", "locale" ]
python
train
bukun/TorCMS
ext_script/autocrud/func_gen_html.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/ext_script/autocrud/func_gen_html.py#L118-L132
def gen_radio_list(sig_dic): ''' For generating List view HTML file for RADIO. for each item. ''' view_zuoxiang = '''<span class="iga_pd_val">''' dic_tmp = sig_dic['dic'] for key in dic_tmp.keys(): tmp_str = '''{{% if postinfo.extinfo['{0}'][0] == "{1}" %}} {2} {{% end %}} '''.format(sig_dic['en'], key, dic_tmp[key]) view_zuoxiang += tmp_str view_zuoxiang += '''</span>''' return view_zuoxiang
[ "def", "gen_radio_list", "(", "sig_dic", ")", ":", "view_zuoxiang", "=", "'''<span class=\"iga_pd_val\">'''", "dic_tmp", "=", "sig_dic", "[", "'dic'", "]", "for", "key", "in", "dic_tmp", ".", "keys", "(", ")", ":", "tmp_str", "=", "'''{{% if postinfo.extinfo['{0}'][0] == \"{1}\" %}} {2} {{% end %}}\n '''", ".", "format", "(", "sig_dic", "[", "'en'", "]", ",", "key", ",", "dic_tmp", "[", "key", "]", ")", "view_zuoxiang", "+=", "tmp_str", "view_zuoxiang", "+=", "'''</span>'''", "return", "view_zuoxiang" ]
For generating List view HTML file for RADIO. for each item.
[ "For", "generating", "List", "view", "HTML", "file", "for", "RADIO", ".", "for", "each", "item", "." ]
python
train
mozilla-releng/scriptworker
scriptworker/cot/verify.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/cot/verify.py#L1890-L1907
async def verify_docker_worker_task(chain, link): """Docker-worker specific checks. Args: chain (ChainOfTrust): the chain we're operating on link (ChainOfTrust or LinkOfTrust): the trust object for the signing task. Raises: CoTError: on failure. """ if chain != link: # These two checks will die on `link.cot` if `link` is a ChainOfTrust # object (e.g., the task we're running `verify_cot` against is a # docker-worker task). So only run these tests if they are not the chain # object. check_interactive_docker_worker(link) verify_docker_image_sha(chain, link)
[ "async", "def", "verify_docker_worker_task", "(", "chain", ",", "link", ")", ":", "if", "chain", "!=", "link", ":", "# These two checks will die on `link.cot` if `link` is a ChainOfTrust", "# object (e.g., the task we're running `verify_cot` against is a", "# docker-worker task). So only run these tests if they are not the chain", "# object.", "check_interactive_docker_worker", "(", "link", ")", "verify_docker_image_sha", "(", "chain", ",", "link", ")" ]
Docker-worker specific checks. Args: chain (ChainOfTrust): the chain we're operating on link (ChainOfTrust or LinkOfTrust): the trust object for the signing task. Raises: CoTError: on failure.
[ "Docker", "-", "worker", "specific", "checks", "." ]
python
train
nficano/python-lambda
aws_lambda/aws_lambda.py
https://github.com/nficano/python-lambda/blob/b0bd25404df70212d7fa057758760366406d64f2/aws_lambda/aws_lambda.py#L400-L421
def _install_packages(path, packages): """Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip. """ def _filter_blacklist(package): blacklist = ['-i', '#', 'Python==', 'python-lambda=='] return all(package.startswith(entry) is False for entry in blacklist) filtered_packages = filter(_filter_blacklist, packages) for package in filtered_packages: if package.startswith('-e '): package = package.replace('-e ', '') print('Installing {package}'.format(package=package)) subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-t', path, '--ignore-installed']) print ('Install directory contents are now: {directory}'.format(directory=os.listdir(path)))
[ "def", "_install_packages", "(", "path", ",", "packages", ")", ":", "def", "_filter_blacklist", "(", "package", ")", ":", "blacklist", "=", "[", "'-i'", ",", "'#'", ",", "'Python=='", ",", "'python-lambda=='", "]", "return", "all", "(", "package", ".", "startswith", "(", "entry", ")", "is", "False", "for", "entry", "in", "blacklist", ")", "filtered_packages", "=", "filter", "(", "_filter_blacklist", ",", "packages", ")", "for", "package", "in", "filtered_packages", ":", "if", "package", ".", "startswith", "(", "'-e '", ")", ":", "package", "=", "package", ".", "replace", "(", "'-e '", ",", "''", ")", "print", "(", "'Installing {package}'", ".", "format", "(", "package", "=", "package", ")", ")", "subprocess", ".", "check_call", "(", "[", "sys", ".", "executable", ",", "'-m'", ",", "'pip'", ",", "'install'", ",", "package", ",", "'-t'", ",", "path", ",", "'--ignore-installed'", "]", ")", "print", "(", "'Install directory contents are now: {directory}'", ".", "format", "(", "directory", "=", "os", ".", "listdir", "(", "path", ")", ")", ")" ]
Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip.
[ "Install", "all", "packages", "listed", "to", "the", "target", "directory", "." ]
python
valid
wummel/linkchecker
third_party/dnspython/dns/opcode.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/opcode.py#L45-L61
def from_text(text): """Convert text into an opcode. @param text: the textual opcode @type text: string @raises UnknownOpcode: the opcode is unknown @rtype: int """ if text.isdigit(): value = int(text) if value >= 0 and value <= 15: return value value = _by_text.get(text.upper()) if value is None: raise UnknownOpcode return value
[ "def", "from_text", "(", "text", ")", ":", "if", "text", ".", "isdigit", "(", ")", ":", "value", "=", "int", "(", "text", ")", "if", "value", ">=", "0", "and", "value", "<=", "15", ":", "return", "value", "value", "=", "_by_text", ".", "get", "(", "text", ".", "upper", "(", ")", ")", "if", "value", "is", "None", ":", "raise", "UnknownOpcode", "return", "value" ]
Convert text into an opcode. @param text: the textual opcode @type text: string @raises UnknownOpcode: the opcode is unknown @rtype: int
[ "Convert", "text", "into", "an", "opcode", "." ]
python
train
awslabs/sockeye
sockeye/model.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/model.py#L209-L249
def _get_embed_weights(self, prefix: str) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol]: """ Returns embedding parameters for source and target. When source and target embeddings are shared, they are created here and passed in to each side, instead of being created in the Embedding constructors. :param prefix: Prefix. :return: Tuple of source and target parameter symbols. """ w_embed_source = mx.sym.Variable(prefix + C.SOURCE_EMBEDDING_PREFIX + "weight", shape=(self.config.config_embed_source.vocab_size, self.config.config_embed_source.num_embed)) w_embed_target = mx.sym.Variable(prefix + C.TARGET_EMBEDDING_PREFIX + "weight", shape=(self.config.config_embed_target.vocab_size, self.config.config_embed_target.num_embed)) w_out_target = mx.sym.Variable(prefix + "target_output_weight", dtype='float32', shape=(self.config.vocab_target_size, self.decoder.get_num_hidden())) if self.config.weight_tying: if C.WEIGHT_TYING_SRC in self.config.weight_tying_type \ and C.WEIGHT_TYING_TRG in self.config.weight_tying_type: logger.info("Tying the source and target embeddings.") w_embed_source = w_embed_target = mx.sym.Variable(prefix + C.SHARED_EMBEDDING_PREFIX + "weight", shape=(self.config.config_embed_source.vocab_size, self.config.config_embed_source.num_embed)) if C.WEIGHT_TYING_SOFTMAX in self.config.weight_tying_type: logger.info("Tying the target embeddings and output layer parameters.") utils.check_condition(self.config.config_embed_target.num_embed == self.decoder.get_num_hidden(), "Weight tying requires target embedding size and decoder hidden size " + "to be equal: %d vs. %d" % (self.config.config_embed_target.num_embed, self.decoder.get_num_hidden())) w_out_target = w_embed_target self._embed_weight_source_name = None if w_embed_source is not None: self._embed_weight_source_name = w_embed_source.name self._embed_weight_target_name = w_embed_target.name self._out_weight_target_name = w_out_target.name return w_embed_source, w_embed_target, w_out_target
[ "def", "_get_embed_weights", "(", "self", ",", "prefix", ":", "str", ")", "->", "Tuple", "[", "mx", ".", "sym", ".", "Symbol", ",", "mx", ".", "sym", ".", "Symbol", ",", "mx", ".", "sym", ".", "Symbol", "]", ":", "w_embed_source", "=", "mx", ".", "sym", ".", "Variable", "(", "prefix", "+", "C", ".", "SOURCE_EMBEDDING_PREFIX", "+", "\"weight\"", ",", "shape", "=", "(", "self", ".", "config", ".", "config_embed_source", ".", "vocab_size", ",", "self", ".", "config", ".", "config_embed_source", ".", "num_embed", ")", ")", "w_embed_target", "=", "mx", ".", "sym", ".", "Variable", "(", "prefix", "+", "C", ".", "TARGET_EMBEDDING_PREFIX", "+", "\"weight\"", ",", "shape", "=", "(", "self", ".", "config", ".", "config_embed_target", ".", "vocab_size", ",", "self", ".", "config", ".", "config_embed_target", ".", "num_embed", ")", ")", "w_out_target", "=", "mx", ".", "sym", ".", "Variable", "(", "prefix", "+", "\"target_output_weight\"", ",", "dtype", "=", "'float32'", ",", "shape", "=", "(", "self", ".", "config", ".", "vocab_target_size", ",", "self", ".", "decoder", ".", "get_num_hidden", "(", ")", ")", ")", "if", "self", ".", "config", ".", "weight_tying", ":", "if", "C", ".", "WEIGHT_TYING_SRC", "in", "self", ".", "config", ".", "weight_tying_type", "and", "C", ".", "WEIGHT_TYING_TRG", "in", "self", ".", "config", ".", "weight_tying_type", ":", "logger", ".", "info", "(", "\"Tying the source and target embeddings.\"", ")", "w_embed_source", "=", "w_embed_target", "=", "mx", ".", "sym", ".", "Variable", "(", "prefix", "+", "C", ".", "SHARED_EMBEDDING_PREFIX", "+", "\"weight\"", ",", "shape", "=", "(", "self", ".", "config", ".", "config_embed_source", ".", "vocab_size", ",", "self", ".", "config", ".", "config_embed_source", ".", "num_embed", ")", ")", "if", "C", ".", "WEIGHT_TYING_SOFTMAX", "in", "self", ".", "config", ".", "weight_tying_type", ":", "logger", ".", "info", "(", "\"Tying the target embeddings and output layer parameters.\"", ")", "utils", ".", "check_condition", "(", "self", ".", "config", ".", "config_embed_target", ".", "num_embed", "==", "self", ".", "decoder", ".", "get_num_hidden", "(", ")", ",", "\"Weight tying requires target embedding size and decoder hidden size \"", "+", "\"to be equal: %d vs. %d\"", "%", "(", "self", ".", "config", ".", "config_embed_target", ".", "num_embed", ",", "self", ".", "decoder", ".", "get_num_hidden", "(", ")", ")", ")", "w_out_target", "=", "w_embed_target", "self", ".", "_embed_weight_source_name", "=", "None", "if", "w_embed_source", "is", "not", "None", ":", "self", ".", "_embed_weight_source_name", "=", "w_embed_source", ".", "name", "self", ".", "_embed_weight_target_name", "=", "w_embed_target", ".", "name", "self", ".", "_out_weight_target_name", "=", "w_out_target", ".", "name", "return", "w_embed_source", ",", "w_embed_target", ",", "w_out_target" ]
Returns embedding parameters for source and target. When source and target embeddings are shared, they are created here and passed in to each side, instead of being created in the Embedding constructors. :param prefix: Prefix. :return: Tuple of source and target parameter symbols.
[ "Returns", "embedding", "parameters", "for", "source", "and", "target", ".", "When", "source", "and", "target", "embeddings", "are", "shared", "they", "are", "created", "here", "and", "passed", "in", "to", "each", "side", "instead", "of", "being", "created", "in", "the", "Embedding", "constructors", "." ]
python
train
sanoma/django-arctic
arctic/mixins.py
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/mixins.py#L393-L401
def _split_str(self, field): """ Split title|7 into (title, 7) """ field_items = field.split("|") if len(field_items) == 2: return field_items[0], field_items[1] elif len(field_items) == 1: return field_items[0], None
[ "def", "_split_str", "(", "self", ",", "field", ")", ":", "field_items", "=", "field", ".", "split", "(", "\"|\"", ")", "if", "len", "(", "field_items", ")", "==", "2", ":", "return", "field_items", "[", "0", "]", ",", "field_items", "[", "1", "]", "elif", "len", "(", "field_items", ")", "==", "1", ":", "return", "field_items", "[", "0", "]", ",", "None" ]
Split title|7 into (title, 7)
[ "Split", "title|7", "into", "(", "title", "7", ")" ]
python
train
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L2335-L2374
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Activate Logical Partition (requires classic mode).""" assert wait_for_completion is True # async not supported yet lpar_oid = uri_parms[0] lpar_uri = '/api/logical-partitions/' + lpar_oid try: lpar = hmc.lookup_by_uri(lpar_uri) except KeyError: raise InvalidResourceError(method, uri) cpc = lpar.manager.parent assert not cpc.dpm_enabled status = lpar.properties.get('status', None) force = body.get('force', False) if body else False if status == 'operating' and not force: raise ServerError(method, uri, reason=263, message="LPAR {!r} could not be activated " "because the LPAR is in status {} " "(and force was not specified).". format(lpar.name, status)) act_profile_name = body.get('activation-profile-name', None) if not act_profile_name: act_profile_name = lpar.properties.get( 'next-activation-profile-name', None) if act_profile_name is None: act_profile_name = '' # Perform the check between LPAR name and profile name if act_profile_name != lpar.name: raise ServerError(method, uri, reason=263, message="LPAR {!r} could not be activated " "because the name of the image activation " "profile {!r} is different from the LPAR name.". format(lpar.name, act_profile_name)) # Reflect the activation in the resource lpar.properties['status'] = LparActivateHandler.get_status() lpar.properties['last-used-activation-profile'] = act_profile_name
[ "def", "post", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", ":", "assert", "wait_for_completion", "is", "True", "# async not supported yet", "lpar_oid", "=", "uri_parms", "[", "0", "]", "lpar_uri", "=", "'/api/logical-partitions/'", "+", "lpar_oid", "try", ":", "lpar", "=", "hmc", ".", "lookup_by_uri", "(", "lpar_uri", ")", "except", "KeyError", ":", "raise", "InvalidResourceError", "(", "method", ",", "uri", ")", "cpc", "=", "lpar", ".", "manager", ".", "parent", "assert", "not", "cpc", ".", "dpm_enabled", "status", "=", "lpar", ".", "properties", ".", "get", "(", "'status'", ",", "None", ")", "force", "=", "body", ".", "get", "(", "'force'", ",", "False", ")", "if", "body", "else", "False", "if", "status", "==", "'operating'", "and", "not", "force", ":", "raise", "ServerError", "(", "method", ",", "uri", ",", "reason", "=", "263", ",", "message", "=", "\"LPAR {!r} could not be activated \"", "\"because the LPAR is in status {} \"", "\"(and force was not specified).\"", ".", "format", "(", "lpar", ".", "name", ",", "status", ")", ")", "act_profile_name", "=", "body", ".", "get", "(", "'activation-profile-name'", ",", "None", ")", "if", "not", "act_profile_name", ":", "act_profile_name", "=", "lpar", ".", "properties", ".", "get", "(", "'next-activation-profile-name'", ",", "None", ")", "if", "act_profile_name", "is", "None", ":", "act_profile_name", "=", "''", "# Perform the check between LPAR name and profile name", "if", "act_profile_name", "!=", "lpar", ".", "name", ":", "raise", "ServerError", "(", "method", ",", "uri", ",", "reason", "=", "263", ",", "message", "=", "\"LPAR {!r} could not be activated \"", "\"because the name of the image activation \"", "\"profile {!r} is different from the LPAR name.\"", ".", "format", "(", "lpar", ".", "name", ",", "act_profile_name", ")", ")", "# Reflect the activation in the resource", "lpar", ".", "properties", "[", "'status'", "]", "=", "LparActivateHandler", ".", "get_status", "(", ")", "lpar", ".", "properties", "[", "'last-used-activation-profile'", "]", "=", "act_profile_name" ]
Operation: Activate Logical Partition (requires classic mode).
[ "Operation", ":", "Activate", "Logical", "Partition", "(", "requires", "classic", "mode", ")", "." ]
python
train
SiLab-Bonn/basil
basil/TL/Visa.py
https://github.com/SiLab-Bonn/basil/blob/99052482d9334dd1f5598eb2d2fb4d5399a32291/basil/TL/Visa.py#L26-L38
def init(self): ''' Initialize the device. Parameters of visa.ResourceManager().open_resource() ''' super(Visa, self).init() backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA) rm = visa.ResourceManager(backend) try: logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources())) except NotImplementedError: # some backends do not always implement the list_resources function logger.info('BASIL VISA TL with %s backend', backend) self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)})
[ "def", "init", "(", "self", ")", ":", "super", "(", "Visa", ",", "self", ")", ".", "init", "(", ")", "backend", "=", "self", ".", "_init", ".", "get", "(", "'backend'", ",", "''", ")", "# Empty string means std. backend (NI VISA)", "rm", "=", "visa", ".", "ResourceManager", "(", "backend", ")", "try", ":", "logger", ".", "info", "(", "'BASIL VISA TL with %s backend found the following devices: %s'", ",", "backend", ",", "\", \"", ".", "join", "(", "rm", ".", "list_resources", "(", ")", ")", ")", "except", "NotImplementedError", ":", "# some backends do not always implement the list_resources function", "logger", ".", "info", "(", "'BASIL VISA TL with %s backend'", ",", "backend", ")", "self", ".", "_resource", "=", "rm", ".", "open_resource", "(", "*", "*", "{", "key", ":", "value", "for", "key", ",", "value", "in", "self", ".", "_init", ".", "items", "(", ")", "if", "key", "not", "in", "(", "\"backend\"", ",", ")", "}", ")" ]
Initialize the device. Parameters of visa.ResourceManager().open_resource()
[ "Initialize", "the", "device", ".", "Parameters", "of", "visa", ".", "ResourceManager", "()", ".", "open_resource", "()" ]
python
train
common-workflow-language/cwltool
cwltool/main.py
https://github.com/common-workflow-language/cwltool/blob/cb81b22abc52838823da9945f04d06739ab32fda/cwltool/main.py#L453-L462
def print_pack(document_loader, # type: Loader processobj, # type: CommentedMap uri, # type: Text metadata # type: Dict[Text, Any] ): # type (...) -> Text """Return a CWL serialization of the CWL document in JSON.""" packed = pack(document_loader, processobj, uri, metadata) if len(packed["$graph"]) > 1: return json_dumps(packed, indent=4) return json_dumps(packed["$graph"][0], indent=4)
[ "def", "print_pack", "(", "document_loader", ",", "# type: Loader", "processobj", ",", "# type: CommentedMap", "uri", ",", "# type: Text", "metadata", "# type: Dict[Text, Any]", ")", ":", "# type (...) -> Text", "packed", "=", "pack", "(", "document_loader", ",", "processobj", ",", "uri", ",", "metadata", ")", "if", "len", "(", "packed", "[", "\"$graph\"", "]", ")", ">", "1", ":", "return", "json_dumps", "(", "packed", ",", "indent", "=", "4", ")", "return", "json_dumps", "(", "packed", "[", "\"$graph\"", "]", "[", "0", "]", ",", "indent", "=", "4", ")" ]
Return a CWL serialization of the CWL document in JSON.
[ "Return", "a", "CWL", "serialization", "of", "the", "CWL", "document", "in", "JSON", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/flask_master/app/app.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/flask_master/app/app.py#L94-L146
def root(): """Home page.""" return { "message": "Welcome to the SIP Master Controller (flask variant)", "_links": { "items": [ { "Link": "Health", "href": "{}health".format(request.url) }, { "Link": "Version", "href": "{}version".format(request.url) }, { "Link": "Allowed target states", "href": "{}allowed_target_sdp_states".format(request.url) }, { "Link": "SDP state", "href": "{}state".format(request.url) }, { "Link": "SDP target state", "href": "{}state/target".format(request.url) }, { "Link": "SDP target state", "href": "{}target_state".format(request.url) }, { "Link": "SDP current state", "href": "{}state/current".format(request.url) }, { "Link": "Scheduling Block Instances", "href": "{}scheduling_block_instances".format(request.url) }, { "Link": "Processing Blocks", "href": "{}processing_blocks".format(request.url) }, { "Link": "Resource Availability", "href": "{}resource_availability".format(request.url) }, { "Link": "Configure SBI", "href": "{}configure_sbi".format(request.url) } ] } }
[ "def", "root", "(", ")", ":", "return", "{", "\"message\"", ":", "\"Welcome to the SIP Master Controller (flask variant)\"", ",", "\"_links\"", ":", "{", "\"items\"", ":", "[", "{", "\"Link\"", ":", "\"Health\"", ",", "\"href\"", ":", "\"{}health\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Version\"", ",", "\"href\"", ":", "\"{}version\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Allowed target states\"", ",", "\"href\"", ":", "\"{}allowed_target_sdp_states\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"SDP state\"", ",", "\"href\"", ":", "\"{}state\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"SDP target state\"", ",", "\"href\"", ":", "\"{}state/target\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"SDP target state\"", ",", "\"href\"", ":", "\"{}target_state\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"SDP current state\"", ",", "\"href\"", ":", "\"{}state/current\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Scheduling Block Instances\"", ",", "\"href\"", ":", "\"{}scheduling_block_instances\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Processing Blocks\"", ",", "\"href\"", ":", "\"{}processing_blocks\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Resource Availability\"", ",", "\"href\"", ":", "\"{}resource_availability\"", ".", "format", "(", "request", ".", "url", ")", "}", ",", "{", "\"Link\"", ":", "\"Configure SBI\"", ",", "\"href\"", ":", "\"{}configure_sbi\"", ".", "format", "(", "request", ".", "url", ")", "}", "]", "}", "}" ]
Home page.
[ "Home", "page", "." ]
python
train
nameko/nameko
nameko/runners.py
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/runners.py#L54-L68
def start(self): """ Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine. """ service_names = ', '.join(self.service_names) _log.info('starting services: %s', service_names) SpawningProxy(self.containers).start() _log.debug('services started: %s', service_names)
[ "def", "start", "(", "self", ")", ":", "service_names", "=", "', '", ".", "join", "(", "self", ".", "service_names", ")", "_log", ".", "info", "(", "'starting services: %s'", ",", "service_names", ")", "SpawningProxy", "(", "self", ".", "containers", ")", ".", "start", "(", ")", "_log", ".", "debug", "(", "'services started: %s'", ",", "service_names", ")" ]
Start all the registered services. A new container is created for each service using the container class provided in the __init__ method. All containers are started concurrently and the method will block until all have completed their startup routine.
[ "Start", "all", "the", "registered", "services", "." ]
python
train
gem/oq-engine
openquake/hmtk/parsers/catalogue/csv_catalogue_parser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/parsers/catalogue/csv_catalogue_parser.py#L130-L143
def _int_check(self, attribute_array, value, irow, key): '''Checks if value is valid integer, appends to array if valid, appends nan if not''' value = value.strip(' ') try: if value: attribute_array = np.hstack([attribute_array, int(value)]) else: attribute_array = np.hstack([attribute_array, np.nan]) except: msg = 'Input file format error at line: %d' % (irow + 2) msg += ' key: %s' % (key) raise ValueError(msg) return attribute_array
[ "def", "_int_check", "(", "self", ",", "attribute_array", ",", "value", ",", "irow", ",", "key", ")", ":", "value", "=", "value", ".", "strip", "(", "' '", ")", "try", ":", "if", "value", ":", "attribute_array", "=", "np", ".", "hstack", "(", "[", "attribute_array", ",", "int", "(", "value", ")", "]", ")", "else", ":", "attribute_array", "=", "np", ".", "hstack", "(", "[", "attribute_array", ",", "np", ".", "nan", "]", ")", "except", ":", "msg", "=", "'Input file format error at line: %d'", "%", "(", "irow", "+", "2", ")", "msg", "+=", "' key: %s'", "%", "(", "key", ")", "raise", "ValueError", "(", "msg", ")", "return", "attribute_array" ]
Checks if value is valid integer, appends to array if valid, appends nan if not
[ "Checks", "if", "value", "is", "valid", "integer", "appends", "to", "array", "if", "valid", "appends", "nan", "if", "not" ]
python
train
rigetti/pyquil
pyquil/api/_wavefunction_simulator.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_wavefunction_simulator.py#L85-L130
def expectation(self, prep_prog: Program, pauli_terms: Union[PauliSum, List[PauliTerm]], memory_map: Any = None) -> Union[float, np.ndarray]: """ Calculate the expectation value of Pauli operators given a state prepared by prep_program. If ``pauli_terms`` is a ``PauliSum`` then the returned value is a single ``float``, otherwise the returned value is an array of values, one for each ``PauliTerm`` in the list. .. note:: If your program contains measurements or noisy gates, this method may not do what you want. If the execution of ``quil_program`` is **non-deterministic** then the final wavefunction from which the expectation value is calculated only represents a stochastically generated sample and the wavefunctions returned by *different* ``wavefunction`` calls *will generally be different*. :param prep_prog: A program that prepares the state on which we measure the expectation. :param pauli_terms: A Pauli representation of a quantum operator. :param memory_map: An assignment of classical registers to values, representing an initial state for the QAM's classical memory. This is expected to be of type Dict[str, List[Union[int, float]]], where the keys are memory region names and the values are arrays of initialization data. For now, we also support input of type Dict[MemoryReference, Any], but this is deprecated and will be removed in a future release. :return: Either a float or array floats depending on ``pauli_terms``. """ is_pauli_sum = False if isinstance(pauli_terms, PauliSum): progs, coeffs = pauli_terms.get_programs() is_pauli_sum = True else: coeffs = np.array([pt.coefficient for pt in pauli_terms]) progs = [pt.program for pt in pauli_terms] if memory_map is not None: prep_prog = self.augment_program_with_memory_values(prep_prog, memory_map) bare_results = self.connection._expectation(prep_prog, progs, random_seed=self.random_seed) results = coeffs * bare_results if is_pauli_sum: return np.sum(results) return results
[ "def", "expectation", "(", "self", ",", "prep_prog", ":", "Program", ",", "pauli_terms", ":", "Union", "[", "PauliSum", ",", "List", "[", "PauliTerm", "]", "]", ",", "memory_map", ":", "Any", "=", "None", ")", "->", "Union", "[", "float", ",", "np", ".", "ndarray", "]", ":", "is_pauli_sum", "=", "False", "if", "isinstance", "(", "pauli_terms", ",", "PauliSum", ")", ":", "progs", ",", "coeffs", "=", "pauli_terms", ".", "get_programs", "(", ")", "is_pauli_sum", "=", "True", "else", ":", "coeffs", "=", "np", ".", "array", "(", "[", "pt", ".", "coefficient", "for", "pt", "in", "pauli_terms", "]", ")", "progs", "=", "[", "pt", ".", "program", "for", "pt", "in", "pauli_terms", "]", "if", "memory_map", "is", "not", "None", ":", "prep_prog", "=", "self", ".", "augment_program_with_memory_values", "(", "prep_prog", ",", "memory_map", ")", "bare_results", "=", "self", ".", "connection", ".", "_expectation", "(", "prep_prog", ",", "progs", ",", "random_seed", "=", "self", ".", "random_seed", ")", "results", "=", "coeffs", "*", "bare_results", "if", "is_pauli_sum", ":", "return", "np", ".", "sum", "(", "results", ")", "return", "results" ]
Calculate the expectation value of Pauli operators given a state prepared by prep_program. If ``pauli_terms`` is a ``PauliSum`` then the returned value is a single ``float``, otherwise the returned value is an array of values, one for each ``PauliTerm`` in the list. .. note:: If your program contains measurements or noisy gates, this method may not do what you want. If the execution of ``quil_program`` is **non-deterministic** then the final wavefunction from which the expectation value is calculated only represents a stochastically generated sample and the wavefunctions returned by *different* ``wavefunction`` calls *will generally be different*. :param prep_prog: A program that prepares the state on which we measure the expectation. :param pauli_terms: A Pauli representation of a quantum operator. :param memory_map: An assignment of classical registers to values, representing an initial state for the QAM's classical memory. This is expected to be of type Dict[str, List[Union[int, float]]], where the keys are memory region names and the values are arrays of initialization data. For now, we also support input of type Dict[MemoryReference, Any], but this is deprecated and will be removed in a future release. :return: Either a float or array floats depending on ``pauli_terms``.
[ "Calculate", "the", "expectation", "value", "of", "Pauli", "operators", "given", "a", "state", "prepared", "by", "prep_program", "." ]
python
train
ch3pjw/junction
jcn/display_elements.py
https://github.com/ch3pjw/junction/blob/7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e/jcn/display_elements.py#L43-L53
def _do_crop(self, lines, width, height, x_crop, y_crop): '''Crops a list of strings to the specified width/height ''' lines = crop_or_expand( lines, height, default=[self.fillchar * width], scheme=self._schemes[y_crop]) for i, line in enumerate(lines): lines[i] = crop_or_expand( line, width, default=self.fillchar, scheme=self._schemes[x_crop]) return lines
[ "def", "_do_crop", "(", "self", ",", "lines", ",", "width", ",", "height", ",", "x_crop", ",", "y_crop", ")", ":", "lines", "=", "crop_or_expand", "(", "lines", ",", "height", ",", "default", "=", "[", "self", ".", "fillchar", "*", "width", "]", ",", "scheme", "=", "self", ".", "_schemes", "[", "y_crop", "]", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "lines", "[", "i", "]", "=", "crop_or_expand", "(", "line", ",", "width", ",", "default", "=", "self", ".", "fillchar", ",", "scheme", "=", "self", ".", "_schemes", "[", "x_crop", "]", ")", "return", "lines" ]
Crops a list of strings to the specified width/height
[ "Crops", "a", "list", "of", "strings", "to", "the", "specified", "width", "/", "height" ]
python
train
konstantint/pyliftover
pyliftover/intervaltree.py
https://github.com/konstantint/pyliftover/blob/5164eed9ae678ad0ddc164df8c2c5767e6a4b39f/pyliftover/intervaltree.py#L111-L135
def _query(self, x, result): ''' Same as self.query, but uses a provided list to accumulate results into. ''' if self.single_interval is None: # Empty return elif self.single_interval != 0: # Single interval, just check whether x is in it if self.single_interval[0] <= x < self.single_interval[1]: result.append(self.single_interval) elif x < self.center: # Normal tree, query point to the left of center if self.left_subtree is not None: self.left_subtree._query(x, result) for int in self.mid_sorted_by_start: if int[0] <= x: result.append(int) else: break else: # Normal tree, query point to the right of center for int in self.mid_sorted_by_end: if int[1] > x: result.append(int) else: break if self.right_subtree is not None: self.right_subtree._query(x, result)
[ "def", "_query", "(", "self", ",", "x", ",", "result", ")", ":", "if", "self", ".", "single_interval", "is", "None", ":", "# Empty", "return", "elif", "self", ".", "single_interval", "!=", "0", ":", "# Single interval, just check whether x is in it", "if", "self", ".", "single_interval", "[", "0", "]", "<=", "x", "<", "self", ".", "single_interval", "[", "1", "]", ":", "result", ".", "append", "(", "self", ".", "single_interval", ")", "elif", "x", "<", "self", ".", "center", ":", "# Normal tree, query point to the left of center", "if", "self", ".", "left_subtree", "is", "not", "None", ":", "self", ".", "left_subtree", ".", "_query", "(", "x", ",", "result", ")", "for", "int", "in", "self", ".", "mid_sorted_by_start", ":", "if", "int", "[", "0", "]", "<=", "x", ":", "result", ".", "append", "(", "int", ")", "else", ":", "break", "else", ":", "# Normal tree, query point to the right of center", "for", "int", "in", "self", ".", "mid_sorted_by_end", ":", "if", "int", "[", "1", "]", ">", "x", ":", "result", ".", "append", "(", "int", ")", "else", ":", "break", "if", "self", ".", "right_subtree", "is", "not", "None", ":", "self", ".", "right_subtree", ".", "_query", "(", "x", ",", "result", ")" ]
Same as self.query, but uses a provided list to accumulate results into.
[ "Same", "as", "self", ".", "query", "but", "uses", "a", "provided", "list", "to", "accumulate", "results", "into", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/pwscf.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/pwscf.py#L173-L186
def as_dict(self): """ Create a dictionary representation of a PWInput object Returns: dict """ pwinput_dict = {'structure': self.structure.as_dict(), 'pseudo': self.pseudo, 'sections': self.sections, 'kpoints_mode': self.kpoints_mode, 'kpoints_grid': self.kpoints_grid, 'kpoints_shift': self.kpoints_shift} return pwinput_dict
[ "def", "as_dict", "(", "self", ")", ":", "pwinput_dict", "=", "{", "'structure'", ":", "self", ".", "structure", ".", "as_dict", "(", ")", ",", "'pseudo'", ":", "self", ".", "pseudo", ",", "'sections'", ":", "self", ".", "sections", ",", "'kpoints_mode'", ":", "self", ".", "kpoints_mode", ",", "'kpoints_grid'", ":", "self", ".", "kpoints_grid", ",", "'kpoints_shift'", ":", "self", ".", "kpoints_shift", "}", "return", "pwinput_dict" ]
Create a dictionary representation of a PWInput object Returns: dict
[ "Create", "a", "dictionary", "representation", "of", "a", "PWInput", "object", "Returns", ":", "dict" ]
python
train
spacetelescope/drizzlepac
drizzlepac/tweakback.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/tweakback.py#L351-L364
def extract_input_filenames(drzfile): """ Generate a list of filenames from a drizzled image's header """ data_kws = fits.getval(drzfile, 'd*data', ext=0, memmap=False) if len(data_kws) == 0: return None fnames = [] for kw in data_kws.cards: f = kw.value.split('[')[0] if f not in fnames: fnames.append(f) return fnames
[ "def", "extract_input_filenames", "(", "drzfile", ")", ":", "data_kws", "=", "fits", ".", "getval", "(", "drzfile", ",", "'d*data'", ",", "ext", "=", "0", ",", "memmap", "=", "False", ")", "if", "len", "(", "data_kws", ")", "==", "0", ":", "return", "None", "fnames", "=", "[", "]", "for", "kw", "in", "data_kws", ".", "cards", ":", "f", "=", "kw", ".", "value", ".", "split", "(", "'['", ")", "[", "0", "]", "if", "f", "not", "in", "fnames", ":", "fnames", ".", "append", "(", "f", ")", "return", "fnames" ]
Generate a list of filenames from a drizzled image's header
[ "Generate", "a", "list", "of", "filenames", "from", "a", "drizzled", "image", "s", "header" ]
python
train
saltstack/salt
salt/utils/stringutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L182-L192
def to_bool(text): ''' Convert the string name of a boolean to that boolean value. ''' downcased_text = six.text_type(text).strip().lower() if downcased_text == 'false': return False elif downcased_text == 'true': return True return text
[ "def", "to_bool", "(", "text", ")", ":", "downcased_text", "=", "six", ".", "text_type", "(", "text", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "downcased_text", "==", "'false'", ":", "return", "False", "elif", "downcased_text", "==", "'true'", ":", "return", "True", "return", "text" ]
Convert the string name of a boolean to that boolean value.
[ "Convert", "the", "string", "name", "of", "a", "boolean", "to", "that", "boolean", "value", "." ]
python
train
dstanek/snake-guice
snakeguice/modules.py
https://github.com/dstanek/snake-guice/blob/d20b62de3ee31e84119c801756398c35ed803fb3/snakeguice/modules.py#L23-L25
def install(self, binder, module): """Add another module's bindings to a binder.""" ModuleAdapter(module, self._injector).configure(binder)
[ "def", "install", "(", "self", ",", "binder", ",", "module", ")", ":", "ModuleAdapter", "(", "module", ",", "self", ".", "_injector", ")", ".", "configure", "(", "binder", ")" ]
Add another module's bindings to a binder.
[ "Add", "another", "module", "s", "bindings", "to", "a", "binder", "." ]
python
train
cds-astro/mocpy
mocpy/moc/moc.py
https://github.com/cds-astro/mocpy/blob/09472cabe537f6bfdb049eeea64d3ea57b391c21/mocpy/moc/moc.py#L88-L119
def contains(self, ra, dec, keep_inside=True): """ Returns a boolean mask array of the positions lying inside (or outside) the MOC instance. Parameters ---------- ra : `astropy.units.Quantity` Right ascension array dec : `astropy.units.Quantity` Declination array keep_inside : bool, optional True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside`` is false, contains will return the mask of the coordinates lying outside the MOC. Returns ------- array : `~np.ndarray` A mask boolean array """ depth = self.max_order m = np.zeros(nside2npix(1 << depth), dtype=bool) pix_id = self._best_res_pixels() m[pix_id] = True if not keep_inside: m = np.logical_not(m) hp = HEALPix(nside=(1 << depth), order='nested') pix = hp.lonlat_to_healpix(ra, dec) return m[pix]
[ "def", "contains", "(", "self", ",", "ra", ",", "dec", ",", "keep_inside", "=", "True", ")", ":", "depth", "=", "self", ".", "max_order", "m", "=", "np", ".", "zeros", "(", "nside2npix", "(", "1", "<<", "depth", ")", ",", "dtype", "=", "bool", ")", "pix_id", "=", "self", ".", "_best_res_pixels", "(", ")", "m", "[", "pix_id", "]", "=", "True", "if", "not", "keep_inside", ":", "m", "=", "np", ".", "logical_not", "(", "m", ")", "hp", "=", "HEALPix", "(", "nside", "=", "(", "1", "<<", "depth", ")", ",", "order", "=", "'nested'", ")", "pix", "=", "hp", ".", "lonlat_to_healpix", "(", "ra", ",", "dec", ")", "return", "m", "[", "pix", "]" ]
Returns a boolean mask array of the positions lying inside (or outside) the MOC instance. Parameters ---------- ra : `astropy.units.Quantity` Right ascension array dec : `astropy.units.Quantity` Declination array keep_inside : bool, optional True by default. If so the mask describes coordinates lying inside the MOC. If ``keep_inside`` is false, contains will return the mask of the coordinates lying outside the MOC. Returns ------- array : `~np.ndarray` A mask boolean array
[ "Returns", "a", "boolean", "mask", "array", "of", "the", "positions", "lying", "inside", "(", "or", "outside", ")", "the", "MOC", "instance", "." ]
python
train
xtrementl/focus
focus/plugin/modules/notify.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/notify.py#L12-L26
def _terminal_notifier(title, message): """ Shows user notification message via `terminal-notifier` command. `title` Notification title. `message` Notification message. """ try: paths = common.extract_app_paths(['terminal-notifier']) except ValueError: pass common.shell_process([paths[0], '-title', title, '-message', message])
[ "def", "_terminal_notifier", "(", "title", ",", "message", ")", ":", "try", ":", "paths", "=", "common", ".", "extract_app_paths", "(", "[", "'terminal-notifier'", "]", ")", "except", "ValueError", ":", "pass", "common", ".", "shell_process", "(", "[", "paths", "[", "0", "]", ",", "'-title'", ",", "title", ",", "'-message'", ",", "message", "]", ")" ]
Shows user notification message via `terminal-notifier` command. `title` Notification title. `message` Notification message.
[ "Shows", "user", "notification", "message", "via", "terminal", "-", "notifier", "command", "." ]
python
train
gabstopper/smc-python
smc/base/collection.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/collection.py#L541-L570
def first(self): """ Returns the first object matched or None if there is no matching object. :: >>> iterator = Host.objects.iterator() >>> c = iterator.filter('kali') >>> if c.exists(): >>> print(c.count()) >>> print(c.first()) 7 Host(name=kali67) If results are not needed and you only 1 result, this can be called from the CollectionManager:: >>> Host.objects.first() Host(name=SMC) :return: element or None """ if len(self): self._params.update(limit=1) if 'filter' not in self._params: return list(self)[0] else: # Filter may not return results result = list(self) if result: return result[0]
[ "def", "first", "(", "self", ")", ":", "if", "len", "(", "self", ")", ":", "self", ".", "_params", ".", "update", "(", "limit", "=", "1", ")", "if", "'filter'", "not", "in", "self", ".", "_params", ":", "return", "list", "(", "self", ")", "[", "0", "]", "else", ":", "# Filter may not return results", "result", "=", "list", "(", "self", ")", "if", "result", ":", "return", "result", "[", "0", "]" ]
Returns the first object matched or None if there is no matching object. :: >>> iterator = Host.objects.iterator() >>> c = iterator.filter('kali') >>> if c.exists(): >>> print(c.count()) >>> print(c.first()) 7 Host(name=kali67) If results are not needed and you only 1 result, this can be called from the CollectionManager:: >>> Host.objects.first() Host(name=SMC) :return: element or None
[ "Returns", "the", "first", "object", "matched", "or", "None", "if", "there", "is", "no", "matching", "object", ".", "::", ">>>", "iterator", "=", "Host", ".", "objects", ".", "iterator", "()", ">>>", "c", "=", "iterator", ".", "filter", "(", "kali", ")", ">>>", "if", "c", ".", "exists", "()", ":", ">>>", "print", "(", "c", ".", "count", "()", ")", ">>>", "print", "(", "c", ".", "first", "()", ")", "7", "Host", "(", "name", "=", "kali67", ")", "If", "results", "are", "not", "needed", "and", "you", "only", "1", "result", "this", "can", "be", "called", "from", "the", "CollectionManager", "::", ">>>", "Host", ".", "objects", ".", "first", "()", "Host", "(", "name", "=", "SMC", ")", ":", "return", ":", "element", "or", "None" ]
python
train
ask/carrot
carrot/messaging.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L1008-L1015
def cancel(self): """Cancel a running :meth:`iterconsume` session.""" for consumer_tag in self._open_consumers.values(): try: self.backend.cancel(consumer_tag) except KeyError: pass self._open_consumers.clear()
[ "def", "cancel", "(", "self", ")", ":", "for", "consumer_tag", "in", "self", ".", "_open_consumers", ".", "values", "(", ")", ":", "try", ":", "self", ".", "backend", ".", "cancel", "(", "consumer_tag", ")", "except", "KeyError", ":", "pass", "self", ".", "_open_consumers", ".", "clear", "(", ")" ]
Cancel a running :meth:`iterconsume` session.
[ "Cancel", "a", "running", ":", "meth", ":", "iterconsume", "session", "." ]
python
train
simpleai-team/simpleai
simpleai/machine_learning/models.py
https://github.com/simpleai-team/simpleai/blob/2836befa7e970013f62e0ee75562652aacac6f65/simpleai/machine_learning/models.py#L76-L86
def load(cls, filepath): """ Loads a pickled version of the classifier saved in `filepath` """ with open(filepath, "rb") as filehandler: classifier = pickle.load(filehandler) if not isinstance(classifier, Classifier): raise ValueError("Pickled object is not a Classifier") return classifier
[ "def", "load", "(", "cls", ",", "filepath", ")", ":", "with", "open", "(", "filepath", ",", "\"rb\"", ")", "as", "filehandler", ":", "classifier", "=", "pickle", ".", "load", "(", "filehandler", ")", "if", "not", "isinstance", "(", "classifier", ",", "Classifier", ")", ":", "raise", "ValueError", "(", "\"Pickled object is not a Classifier\"", ")", "return", "classifier" ]
Loads a pickled version of the classifier saved in `filepath`
[ "Loads", "a", "pickled", "version", "of", "the", "classifier", "saved", "in", "filepath" ]
python
train
petrjasek/eve-elastic
eve_elastic/elastic.py
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L118-L122
def default(self, value): """Convert mongo.ObjectId.""" if isinstance(value, ObjectId): return str(value) return super(ElasticJSONSerializer, self).default(value)
[ "def", "default", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "ObjectId", ")", ":", "return", "str", "(", "value", ")", "return", "super", "(", "ElasticJSONSerializer", ",", "self", ")", ".", "default", "(", "value", ")" ]
Convert mongo.ObjectId.
[ "Convert", "mongo", ".", "ObjectId", "." ]
python
train
HazyResearch/fonduer
src/fonduer/parser/parser.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L655-L688
def _parse_section(self, node, state): """Parse a Section of the node. Note that this implementation currently creates a Section at the beginning of the document and creates Section based on tag of node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["html", "section"]: return state # Add a Section stable_id = ( f"{state['document'].name}" f"::" f"{'section'}" f":" f"{state['section']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None state["context"][node] = Section( document=state["document"], name=name, stable_id=stable_id, position=state["section"]["idx"], ) state["section"]["idx"] += 1 return state
[ "def", "_parse_section", "(", "self", ",", "node", ",", "state", ")", ":", "if", "node", ".", "tag", "not", "in", "[", "\"html\"", ",", "\"section\"", "]", ":", "return", "state", "# Add a Section", "stable_id", "=", "(", "f\"{state['document'].name}\"", "f\"::\"", "f\"{'section'}\"", "f\":\"", "f\"{state['section']['idx']}\"", ")", "# Set name for Section", "name", "=", "node", ".", "attrib", "[", "\"name\"", "]", "if", "\"name\"", "in", "node", ".", "attrib", "else", "None", "state", "[", "\"context\"", "]", "[", "node", "]", "=", "Section", "(", "document", "=", "state", "[", "\"document\"", "]", ",", "name", "=", "name", ",", "stable_id", "=", "stable_id", ",", "position", "=", "state", "[", "\"section\"", "]", "[", "\"idx\"", "]", ",", ")", "state", "[", "\"section\"", "]", "[", "\"idx\"", "]", "+=", "1", "return", "state" ]
Parse a Section of the node. Note that this implementation currently creates a Section at the beginning of the document and creates Section based on tag of node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole.
[ "Parse", "a", "Section", "of", "the", "node", "." ]
python
train
alfred82santa/aio-service-client
service_client/__init__.py
https://github.com/alfred82santa/aio-service-client/blob/dd9ad49e23067b22178534915aa23ba24f6ff39b/service_client/__init__.py#L222-L229
def close(self): """ Close service client and its plugins. """ self._execute_plugin_hooks_sync(hook='close') if not self.session.closed: ensure_future(self.session.close(), loop=self.loop)
[ "def", "close", "(", "self", ")", ":", "self", ".", "_execute_plugin_hooks_sync", "(", "hook", "=", "'close'", ")", "if", "not", "self", ".", "session", ".", "closed", ":", "ensure_future", "(", "self", ".", "session", ".", "close", "(", ")", ",", "loop", "=", "self", ".", "loop", ")" ]
Close service client and its plugins.
[ "Close", "service", "client", "and", "its", "plugins", "." ]
python
train
dropbox/pyannotate
pyannotate_tools/annotations/parse.py
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_tools/annotations/parse.py#L173-L210
def tokenize(s): # type: (str) -> List[Token] """Translate a type comment into a list of tokens.""" original = s tokens = [] # type: List[Token] while True: if not s: tokens.append(End()) return tokens elif s[0] == ' ': s = s[1:] elif s[0] in '()[],*': tokens.append(Separator(s[0])) s = s[1:] elif s[:2] == '->': tokens.append(Separator('->')) s = s[2:] else: m = re.match(r'[-\w]+(\s*(\.|:)\s*[-/\w]*)*', s) if not m: raise ParseError(original) fullname = m.group(0) fullname = fullname.replace(' ', '') if fullname in TYPE_FIXUPS: fullname = TYPE_FIXUPS[fullname] # pytz creates classes with the name of the timezone being used: # https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123 # This causes pyannotates to crash as it's invalid to have a class # name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles") if fullname.startswith('pytz.tzfile.'): fullname = 'datetime.tzinfo' if '-' in fullname or '/' in fullname: # Not a valid Python name; there are many places that # generate these, so we just substitute Any rather # than crashing. fullname = 'Any' tokens.append(DottedName(fullname)) s = s[len(m.group(0)):]
[ "def", "tokenize", "(", "s", ")", ":", "# type: (str) -> List[Token]", "original", "=", "s", "tokens", "=", "[", "]", "# type: List[Token]", "while", "True", ":", "if", "not", "s", ":", "tokens", ".", "append", "(", "End", "(", ")", ")", "return", "tokens", "elif", "s", "[", "0", "]", "==", "' '", ":", "s", "=", "s", "[", "1", ":", "]", "elif", "s", "[", "0", "]", "in", "'()[],*'", ":", "tokens", ".", "append", "(", "Separator", "(", "s", "[", "0", "]", ")", ")", "s", "=", "s", "[", "1", ":", "]", "elif", "s", "[", ":", "2", "]", "==", "'->'", ":", "tokens", ".", "append", "(", "Separator", "(", "'->'", ")", ")", "s", "=", "s", "[", "2", ":", "]", "else", ":", "m", "=", "re", ".", "match", "(", "r'[-\\w]+(\\s*(\\.|:)\\s*[-/\\w]*)*'", ",", "s", ")", "if", "not", "m", ":", "raise", "ParseError", "(", "original", ")", "fullname", "=", "m", ".", "group", "(", "0", ")", "fullname", "=", "fullname", ".", "replace", "(", "' '", ",", "''", ")", "if", "fullname", "in", "TYPE_FIXUPS", ":", "fullname", "=", "TYPE_FIXUPS", "[", "fullname", "]", "# pytz creates classes with the name of the timezone being used:", "# https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123", "# This causes pyannotates to crash as it's invalid to have a class", "# name with a `/` in it (e.g. \"pytz.tzfile.America/Los_Angeles\")", "if", "fullname", ".", "startswith", "(", "'pytz.tzfile.'", ")", ":", "fullname", "=", "'datetime.tzinfo'", "if", "'-'", "in", "fullname", "or", "'/'", "in", "fullname", ":", "# Not a valid Python name; there are many places that", "# generate these, so we just substitute Any rather", "# than crashing.", "fullname", "=", "'Any'", "tokens", ".", "append", "(", "DottedName", "(", "fullname", ")", ")", "s", "=", "s", "[", "len", "(", "m", ".", "group", "(", "0", ")", ")", ":", "]" ]
Translate a type comment into a list of tokens.
[ "Translate", "a", "type", "comment", "into", "a", "list", "of", "tokens", "." ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3600-L3627
def get_value(self, *args, **kwargs): """ This method should be overriden by any subclass of Parameter, and should be decorated with the @update_if_client decorator. Please see the individual classes documentation: * :meth:`FloatParameter.get_value` * :meth:`ArrayParameter.get_value` * :meth:`HierarchyParameter.get_value` * :meth:`IntParameter.get_value` * :meth:`BoolParameter.get_value` * :meth:`ChoiceParameter.get_value` * :meth:`ConstraintParameter.get_value` * :meth:`HistoryParameter.get_value` If subclassing, this method needs to: * cast to the correct type/units, handling defaults :raises NotImplementedError: because this must be subclassed """ if self.qualifier in kwargs.keys(): # then we have an "override" value that was passed, and we should # just return that. # Example teff_param.get_value('teff', teff=6000) returns 6000 return kwargs.get(self.qualifier) return None
[ "def", "get_value", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "qualifier", "in", "kwargs", ".", "keys", "(", ")", ":", "# then we have an \"override\" value that was passed, and we should", "# just return that.", "# Example teff_param.get_value('teff', teff=6000) returns 6000", "return", "kwargs", ".", "get", "(", "self", ".", "qualifier", ")", "return", "None" ]
This method should be overriden by any subclass of Parameter, and should be decorated with the @update_if_client decorator. Please see the individual classes documentation: * :meth:`FloatParameter.get_value` * :meth:`ArrayParameter.get_value` * :meth:`HierarchyParameter.get_value` * :meth:`IntParameter.get_value` * :meth:`BoolParameter.get_value` * :meth:`ChoiceParameter.get_value` * :meth:`ConstraintParameter.get_value` * :meth:`HistoryParameter.get_value` If subclassing, this method needs to: * cast to the correct type/units, handling defaults :raises NotImplementedError: because this must be subclassed
[ "This", "method", "should", "be", "overriden", "by", "any", "subclass", "of", "Parameter", "and", "should", "be", "decorated", "with", "the", "@update_if_client", "decorator", ".", "Please", "see", "the", "individual", "classes", "documentation", ":" ]
python
train
mozilla/amo-validator
validator/errorbundler.py
https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/errorbundler.py#L257-L261
def failed(self, fail_on_warnings=True): """Returns a boolean value describing whether the validation succeeded or not.""" return bool(self.errors) or (fail_on_warnings and bool(self.warnings))
[ "def", "failed", "(", "self", ",", "fail_on_warnings", "=", "True", ")", ":", "return", "bool", "(", "self", ".", "errors", ")", "or", "(", "fail_on_warnings", "and", "bool", "(", "self", ".", "warnings", ")", ")" ]
Returns a boolean value describing whether the validation succeeded or not.
[ "Returns", "a", "boolean", "value", "describing", "whether", "the", "validation", "succeeded", "or", "not", "." ]
python
train
oscarlazoarjona/fast
fast/misc.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/misc.py#L322-L335
def detuning_combinations(lists): r"""This function recieves a list of length Nl with the number of transitions each laser induces. It returns the cartesian product of all these posibilities as a list of all possible combinations. """ Nl = len(lists) comb = [[i] for i in range(lists[0])] for l in range(1, Nl): combn = [] for c0 in comb: for cl in range(lists[l]): combn += [c0[:]+[cl]] comb = combn[:] return comb
[ "def", "detuning_combinations", "(", "lists", ")", ":", "Nl", "=", "len", "(", "lists", ")", "comb", "=", "[", "[", "i", "]", "for", "i", "in", "range", "(", "lists", "[", "0", "]", ")", "]", "for", "l", "in", "range", "(", "1", ",", "Nl", ")", ":", "combn", "=", "[", "]", "for", "c0", "in", "comb", ":", "for", "cl", "in", "range", "(", "lists", "[", "l", "]", ")", ":", "combn", "+=", "[", "c0", "[", ":", "]", "+", "[", "cl", "]", "]", "comb", "=", "combn", "[", ":", "]", "return", "comb" ]
r"""This function recieves a list of length Nl with the number of transitions each laser induces. It returns the cartesian product of all these posibilities as a list of all possible combinations.
[ "r", "This", "function", "recieves", "a", "list", "of", "length", "Nl", "with", "the", "number", "of", "transitions", "each", "laser", "induces", ".", "It", "returns", "the", "cartesian", "product", "of", "all", "these", "posibilities", "as", "a", "list", "of", "all", "possible", "combinations", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment_authoring/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/objects.py#L860-L870
def get_maximum_score_metadata(self): """Gets the metadata for the maximum score. return: (osid.Metadata) - metadata for the maximum score *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['maximum_score']) metadata.update({'existing_cardinal_values': self._my_map['maximumScore']}) return Metadata(**metadata)
[ "def", "get_maximum_score_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'maximum_score'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_cardinal_values'", ":", "self", ".", "_my_map", "[", "'maximumScore'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for the maximum score. return: (osid.Metadata) - metadata for the maximum score *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "the", "maximum", "score", "." ]
python
train
JNPRAutomate/pyJunosManager
pyJunosManager/pyJunosManager.py
https://github.com/JNPRAutomate/pyJunosManager/blob/cfbe87bb55488f44bad0b383771a88be7b2ccf2a/pyJunosManager/pyJunosManager.py#L95-L131
def open_config(self,type="shared"): """ Opens the configuration of the currently connected device Args: :type: The type of configuration you want to open. Any string can be provided, however the standard supported options are: **exclusive**, **private**, and **shared**. The default mode is **shared**. Examples: .. code-block:: python #Open shared config from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.open_config() dev.close_config() dev.close() #Open private config from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.open_config("private") dev.close_config() dev.close() """ try: #attempt to open a configuration output = self.dev.rpc("<open-configuration><{0}/></open-configuration>".format(type)) except Exception as err: #output an error if the configuration is not availble print err
[ "def", "open_config", "(", "self", ",", "type", "=", "\"shared\"", ")", ":", "try", ":", "#attempt to open a configuration", "output", "=", "self", ".", "dev", ".", "rpc", "(", "\"<open-configuration><{0}/></open-configuration>\"", ".", "format", "(", "type", ")", ")", "except", "Exception", "as", "err", ":", "#output an error if the configuration is not availble", "print", "err" ]
Opens the configuration of the currently connected device Args: :type: The type of configuration you want to open. Any string can be provided, however the standard supported options are: **exclusive**, **private**, and **shared**. The default mode is **shared**. Examples: .. code-block:: python #Open shared config from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.open_config() dev.close_config() dev.close() #Open private config from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.open_config("private") dev.close_config() dev.close()
[ "Opens", "the", "configuration", "of", "the", "currently", "connected", "device" ]
python
train
facelessuser/backrefs
backrefs/uniprops/__init__.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/uniprops/__init__.py#L341-L352
def get_script_property(value, is_bytes=False): """Get `SC` property.""" obj = unidata.ascii_scripts if is_bytes else unidata.unicode_scripts if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['script'].get(negated, negated) else: value = unidata.unicode_alias['script'].get(value, value) return obj[value]
[ "def", "get_script_property", "(", "value", ",", "is_bytes", "=", "False", ")", ":", "obj", "=", "unidata", ".", "ascii_scripts", "if", "is_bytes", "else", "unidata", ".", "unicode_scripts", "if", "value", ".", "startswith", "(", "'^'", ")", ":", "negated", "=", "value", "[", "1", ":", "]", "value", "=", "'^'", "+", "unidata", ".", "unicode_alias", "[", "'script'", "]", ".", "get", "(", "negated", ",", "negated", ")", "else", ":", "value", "=", "unidata", ".", "unicode_alias", "[", "'script'", "]", ".", "get", "(", "value", ",", "value", ")", "return", "obj", "[", "value", "]" ]
Get `SC` property.
[ "Get", "SC", "property", "." ]
python
train
pycontribs/pyrax
pyrax/cloudmonitoring.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudmonitoring.py#L55-L65
def assure_entity(fnc): """ Converts an entityID passed as the entity to a CloudMonitorEntity object. """ @wraps(fnc) def _wrapped(self, entity, *args, **kwargs): if not isinstance(entity, CloudMonitorEntity): # Must be the ID entity = self._entity_manager.get(entity) return fnc(self, entity, *args, **kwargs) return _wrapped
[ "def", "assure_entity", "(", "fnc", ")", ":", "@", "wraps", "(", "fnc", ")", "def", "_wrapped", "(", "self", ",", "entity", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "entity", ",", "CloudMonitorEntity", ")", ":", "# Must be the ID", "entity", "=", "self", ".", "_entity_manager", ".", "get", "(", "entity", ")", "return", "fnc", "(", "self", ",", "entity", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapped" ]
Converts an entityID passed as the entity to a CloudMonitorEntity object.
[ "Converts", "an", "entityID", "passed", "as", "the", "entity", "to", "a", "CloudMonitorEntity", "object", "." ]
python
train
swimlane/swimlane-python
swimlane/core/adapters/helper.py
https://github.com/swimlane/swimlane-python/blob/588fc503a76799bcdb5aecdf2f64a6ee05e3922d/swimlane/core/adapters/helper.py#L33-L57
def add_comment(self, app_id, record_id, field_id, message): """Directly add a comment to a record without retrieving the app or record first Warnings: Does not perform any app, record, or field ID validation Args: app_id (str): Full App ID string record_id (str): Full parent Record ID string field_id (str): Full field ID to target reference field on parent Record string message (str): New comment message body """ self._swimlane.request( 'post', 'app/{0}/record/{1}/{2}/comment'.format( app_id, record_id, field_id ), json={ 'message': message, 'createdDate': pendulum.now().to_rfc3339_string() } )
[ "def", "add_comment", "(", "self", ",", "app_id", ",", "record_id", ",", "field_id", ",", "message", ")", ":", "self", ".", "_swimlane", ".", "request", "(", "'post'", ",", "'app/{0}/record/{1}/{2}/comment'", ".", "format", "(", "app_id", ",", "record_id", ",", "field_id", ")", ",", "json", "=", "{", "'message'", ":", "message", ",", "'createdDate'", ":", "pendulum", ".", "now", "(", ")", ".", "to_rfc3339_string", "(", ")", "}", ")" ]
Directly add a comment to a record without retrieving the app or record first Warnings: Does not perform any app, record, or field ID validation Args: app_id (str): Full App ID string record_id (str): Full parent Record ID string field_id (str): Full field ID to target reference field on parent Record string message (str): New comment message body
[ "Directly", "add", "a", "comment", "to", "a", "record", "without", "retrieving", "the", "app", "or", "record", "first" ]
python
train
h2non/paco
paco/every.py
https://github.com/h2non/paco/blob/1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d/paco/every.py#L11-L84
def every(coro, iterable, limit=1, loop=None): """ Returns `True` if every element in a given iterable satisfies the coroutine asynchronous test. If any iteratee coroutine call returns `False`, the process is inmediately stopped, and `False` will be returned. You can increase the concurrency limit for a fast race condition scenario. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): coroutine function to call with values to reduce. iterable (iterable): an iterable collection yielding coroutines functions. limit (int): max concurrency execution limit. Use ``0`` for no limit. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: bool: `True` if all the values passes the test, otherwise `False`. Usage:: async def gt_10(num): return num > 10 await paco.every(gt_10, [1, 2, 3, 11]) # => False await paco.every(gt_10, [11, 12, 13]) # => True """ assert_corofunction(coro=coro) assert_iter(iterable=iterable) # Reduced accumulator value passes = True # Handle empty iterables if len(iterable) == 0: return passes # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) # Tester function to guarantee the file is canceled. @asyncio.coroutine def tester(element): nonlocal passes if not passes: return None if not (yield from coro(element)): # Flag as not test passed passes = False # Force ignoring pending coroutines pool.cancel() # Iterate and attach coroutine for defer scheduling for element in iterable: pool.add(partial(tester, element)) # Wait until all coroutines finish yield from pool.run() return passes
[ "def", "every", "(", "coro", ",", "iterable", ",", "limit", "=", "1", ",", "loop", "=", "None", ")", ":", "assert_corofunction", "(", "coro", "=", "coro", ")", "assert_iter", "(", "iterable", "=", "iterable", ")", "# Reduced accumulator value", "passes", "=", "True", "# Handle empty iterables", "if", "len", "(", "iterable", ")", "==", "0", ":", "return", "passes", "# Create concurrent executor", "pool", "=", "ConcurrentExecutor", "(", "limit", "=", "limit", ",", "loop", "=", "loop", ")", "# Tester function to guarantee the file is canceled.", "@", "asyncio", ".", "coroutine", "def", "tester", "(", "element", ")", ":", "nonlocal", "passes", "if", "not", "passes", ":", "return", "None", "if", "not", "(", "yield", "from", "coro", "(", "element", ")", ")", ":", "# Flag as not test passed", "passes", "=", "False", "# Force ignoring pending coroutines", "pool", ".", "cancel", "(", ")", "# Iterate and attach coroutine for defer scheduling", "for", "element", "in", "iterable", ":", "pool", ".", "add", "(", "partial", "(", "tester", ",", "element", ")", ")", "# Wait until all coroutines finish", "yield", "from", "pool", ".", "run", "(", ")", "return", "passes" ]
Returns `True` if every element in a given iterable satisfies the coroutine asynchronous test. If any iteratee coroutine call returns `False`, the process is inmediately stopped, and `False` will be returned. You can increase the concurrency limit for a fast race condition scenario. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): coroutine function to call with values to reduce. iterable (iterable): an iterable collection yielding coroutines functions. limit (int): max concurrency execution limit. Use ``0`` for no limit. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: bool: `True` if all the values passes the test, otherwise `False`. Usage:: async def gt_10(num): return num > 10 await paco.every(gt_10, [1, 2, 3, 11]) # => False await paco.every(gt_10, [11, 12, 13]) # => True
[ "Returns", "True", "if", "every", "element", "in", "a", "given", "iterable", "satisfies", "the", "coroutine", "asynchronous", "test", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/virtual/base_runnable.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/virtual/base_runnable.py#L47-L56
def start_workers(self): """Start running this virtual device including any necessary worker threads.""" if self._started: raise InternalError("The method start() was called twice on a BaseRunnable object.") self._started = True for worker in self._workers: worker.start()
[ "def", "start_workers", "(", "self", ")", ":", "if", "self", ".", "_started", ":", "raise", "InternalError", "(", "\"The method start() was called twice on a BaseRunnable object.\"", ")", "self", ".", "_started", "=", "True", "for", "worker", "in", "self", ".", "_workers", ":", "worker", ".", "start", "(", ")" ]
Start running this virtual device including any necessary worker threads.
[ "Start", "running", "this", "virtual", "device", "including", "any", "necessary", "worker", "threads", "." ]
python
train
ornlneutronimaging/ImagingReso
ImagingReso/resonance.py
https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/resonance.py#L785-L978
def export(self, output_type='df', filename=None, x_axis='energy', y_axis='attenuation', mixed=True, all_layers=False, all_elements=False, all_isotopes=False, items_to_export=None, offset_us=0., source_to_detector_m=16., t_start_us=1, time_resolution_us=0.16, time_unit='us'): """ output x and y values to clipboard or .csv file output the transmission or attenuation or sigma of compound, element and/or isotopes specified 'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio) 'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack. :param output_type: export type : ['df', 'csv', 'clip'] :type output_type: str :param mixed: True -> display the total of each layer False -> not displayed :type mixed: boolean :param filename: string. filename (with .csv suffix) you would like to save as None -> export to clipboard :type filename: string :param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number'] :param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'miu_per_cm'] :param all_layers: boolean. True -> export all layers False -> not export :param all_elements: boolean. True -> export all elements signal False -> not export :param all_isotopes: boolean. True -> export all isotopes signal False -> not export :param items_to_export: array that describes what to export ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either 's' or 'us' or 'ns' Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame' """ if x_axis not in x_type_list: raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list)) if time_unit not in time_unit_list: raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list)) if y_axis not in y_type_list: raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list)) if output_type not in export_type_list: raise ValueError("Please specify export type using one from '{}'.".format(export_type_list)) # stack from self _stack_signal = self.stack_signal _stack = self.stack _x_axis = self.total_signal['energy_eV'] x_axis_label = None df = pd.DataFrame() """X-axis""" # determine values and labels for x-axis with options from # 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)' if x_axis == 'energy': x_axis_label = 'Energy (eV)' if x_axis == 'lambda': x_axis_label = u"Wavelength (\u212B)" _x_axis = _utilities.ev_to_angstroms(array=_x_axis) if x_axis == 'time': if time_unit == 's': x_axis_label = 'Time (s)' _x_axis = _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'us': x_axis_label = 'Time (us)' _x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'ns': x_axis_label = 'Time (ns)' _x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}" .format(x_axis_label, source_to_detector_m, offset_us)) if x_axis == 'number': x_axis_label = 'Image number (#)' _x_axis = _utilities.ev_to_image_number(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us, time_resolution_us=time_resolution_us, t_start_us=t_start_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}" .format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us)) if x_axis_label is None: raise ValueError("x_axis_label does NOT exist, please check.") df[x_axis_label] = _x_axis """Y-axis""" if y_axis[:5] != 'sigma': # export transmission or attenuation or miu_per_cm y_axis_tag = y_axis if y_axis_tag == 'miu_per_cm': mixed = False print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'") if mixed: _y_axis = self.total_signal[y_axis_tag] df['Total_' + y_axis_tag] = _y_axis if items_to_export is None: # export based on specified level : layer|element|isotope if all_layers: for _compound in _stack.keys(): _y_axis = _stack_signal[_compound][y_axis_tag] df[_compound] = _y_axis if all_elements: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: _y_axis = _stack_signal[_compound][_element][y_axis_tag] df[_compound + '/' + _element] = _y_axis if all_isotopes: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: for _isotope in _stack[_compound][_element]['isotopes']['list']: _y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag] df[_compound + '/' + _element + '/' + _isotope] = _y_axis else: # export specified transmission or attenuation for _path_to_export in items_to_export: _path_to_export = list(_path_to_export) _live_path = _stack_signal _label = "/".join(_path_to_export) while _path_to_export: _item = _path_to_export.pop(0) _live_path = _live_path[_item] _y_axis = _live_path[y_axis_tag] df[_label] = _y_axis else: # export sigma if y_axis == 'sigma': y_axis_tag = 'sigma_b' else: y_axis_tag = 'sigma_b_raw' # y_axis_tag = 'sigma_b_raw' _stack_sigma = self.stack_sigma if items_to_export is None: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: _y_axis = _stack_sigma[_compound][_element]['sigma_b'] # No 'sigma_b_raw' at this level df[_compound + '/' + _element + '/atoms_per_cm3'] = _stack[_compound][_element]['atoms_per_cm3'] df[_compound + '/' + _element] = _y_axis if all_isotopes: for _isotope in _stack[_compound][_element]['isotopes']['list']: _y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag] df[_compound + '/' + _element + '/' + _isotope] = _y_axis else: # export specified sigma for _path_to_export in items_to_export: if y_axis_tag[-3:] == 'raw': if len(_path_to_export) < 3: raise ValueError( "Getting raw sigma of '{}' at layer or element level is not supported. " "If it is a single element layer, please follow " "['layer', 'element', 'isotope'] format.".format(_path_to_export[0])) else: if len(_path_to_export) < 2: raise ValueError( "Getting weighted sigma of '{}' at layer level is not supported. " "If it is a single element layer, please follow " "['layer', 'element'] format.".format(_path_to_export[0])) _path_to_export = list(_path_to_export) _live_path = _stack_sigma _label = "/".join(_path_to_export) while _path_to_export: _item = _path_to_export.pop(0) _live_path = _live_path[_item] _y_axis = _live_path[y_axis_tag] df[_label] = _y_axis if len(df.columns) <= 1: raise ValueError("No y values have been selected to export!") if output_type == 'csv': if filename is None: filename = 'data.csv' if '.csv' not in filename: filename += '.csv' df.to_csv(filename, index=False) print("Exporting to file ('./{}') completed.".format(filename)) elif output_type == 'clip': df.to_clipboard(excel=True, index=False) print('Exporting to clipboard completed.') else: # output_type == 'df' return df
[ "def", "export", "(", "self", ",", "output_type", "=", "'df'", ",", "filename", "=", "None", ",", "x_axis", "=", "'energy'", ",", "y_axis", "=", "'attenuation'", ",", "mixed", "=", "True", ",", "all_layers", "=", "False", ",", "all_elements", "=", "False", ",", "all_isotopes", "=", "False", ",", "items_to_export", "=", "None", ",", "offset_us", "=", "0.", ",", "source_to_detector_m", "=", "16.", ",", "t_start_us", "=", "1", ",", "time_resolution_us", "=", "0.16", ",", "time_unit", "=", "'us'", ")", ":", "if", "x_axis", "not", "in", "x_type_list", ":", "raise", "ValueError", "(", "\"Please specify the x-axis type using one from '{}'.\"", ".", "format", "(", "x_type_list", ")", ")", "if", "time_unit", "not", "in", "time_unit_list", ":", "raise", "ValueError", "(", "\"Please specify the time unit using one from '{}'.\"", ".", "format", "(", "time_unit_list", ")", ")", "if", "y_axis", "not", "in", "y_type_list", ":", "raise", "ValueError", "(", "\"Please specify the y-axis type using one from '{}'.\"", ".", "format", "(", "y_type_list", ")", ")", "if", "output_type", "not", "in", "export_type_list", ":", "raise", "ValueError", "(", "\"Please specify export type using one from '{}'.\"", ".", "format", "(", "export_type_list", ")", ")", "# stack from self", "_stack_signal", "=", "self", ".", "stack_signal", "_stack", "=", "self", ".", "stack", "_x_axis", "=", "self", ".", "total_signal", "[", "'energy_eV'", "]", "x_axis_label", "=", "None", "df", "=", "pd", ".", "DataFrame", "(", ")", "\"\"\"X-axis\"\"\"", "# determine values and labels for x-axis with options from", "# 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)'", "if", "x_axis", "==", "'energy'", ":", "x_axis_label", "=", "'Energy (eV)'", "if", "x_axis", "==", "'lambda'", ":", "x_axis_label", "=", "u\"Wavelength (\\u212B)\"", "_x_axis", "=", "_utilities", ".", "ev_to_angstroms", "(", "array", "=", "_x_axis", ")", "if", "x_axis", "==", "'time'", ":", "if", "time_unit", "==", "'s'", ":", "x_axis_label", "=", "'Time (s)'", "_x_axis", "=", "_utilities", ".", "ev_to_s", "(", "array", "=", "_x_axis", ",", "source_to_detector_m", "=", "source_to_detector_m", ",", "offset_us", "=", "offset_us", ")", "if", "time_unit", "==", "'us'", ":", "x_axis_label", "=", "'Time (us)'", "_x_axis", "=", "1e6", "*", "_utilities", ".", "ev_to_s", "(", "array", "=", "_x_axis", ",", "source_to_detector_m", "=", "source_to_detector_m", ",", "offset_us", "=", "offset_us", ")", "if", "time_unit", "==", "'ns'", ":", "x_axis_label", "=", "'Time (ns)'", "_x_axis", "=", "1e9", "*", "_utilities", ".", "ev_to_s", "(", "array", "=", "_x_axis", ",", "source_to_detector_m", "=", "source_to_detector_m", ",", "offset_us", "=", "offset_us", ")", "print", "(", "\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\"", ".", "format", "(", "x_axis_label", ",", "source_to_detector_m", ",", "offset_us", ")", ")", "if", "x_axis", "==", "'number'", ":", "x_axis_label", "=", "'Image number (#)'", "_x_axis", "=", "_utilities", ".", "ev_to_image_number", "(", "array", "=", "_x_axis", ",", "source_to_detector_m", "=", "source_to_detector_m", ",", "offset_us", "=", "offset_us", ",", "time_resolution_us", "=", "time_resolution_us", ",", "t_start_us", "=", "t_start_us", ")", "print", "(", "\"'{}' was obtained with the following:\\nsource_to_detector_m={}\\noffset_us={}\\ntime_resolution_us={}\"", ".", "format", "(", "x_axis_label", ",", "source_to_detector_m", ",", "offset_us", ",", "time_resolution_us", ")", ")", "if", "x_axis_label", "is", "None", ":", "raise", "ValueError", "(", "\"x_axis_label does NOT exist, please check.\"", ")", "df", "[", "x_axis_label", "]", "=", "_x_axis", "\"\"\"Y-axis\"\"\"", "if", "y_axis", "[", ":", "5", "]", "!=", "'sigma'", ":", "# export transmission or attenuation or miu_per_cm", "y_axis_tag", "=", "y_axis", "if", "y_axis_tag", "==", "'miu_per_cm'", ":", "mixed", "=", "False", "print", "(", "\"'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'\"", ")", "if", "mixed", ":", "_y_axis", "=", "self", ".", "total_signal", "[", "y_axis_tag", "]", "df", "[", "'Total_'", "+", "y_axis_tag", "]", "=", "_y_axis", "if", "items_to_export", "is", "None", ":", "# export based on specified level : layer|element|isotope", "if", "all_layers", ":", "for", "_compound", "in", "_stack", ".", "keys", "(", ")", ":", "_y_axis", "=", "_stack_signal", "[", "_compound", "]", "[", "y_axis_tag", "]", "df", "[", "_compound", "]", "=", "_y_axis", "if", "all_elements", ":", "for", "_compound", "in", "_stack", ".", "keys", "(", ")", ":", "for", "_element", "in", "_stack", "[", "_compound", "]", "[", "'elements'", "]", ":", "_y_axis", "=", "_stack_signal", "[", "_compound", "]", "[", "_element", "]", "[", "y_axis_tag", "]", "df", "[", "_compound", "+", "'/'", "+", "_element", "]", "=", "_y_axis", "if", "all_isotopes", ":", "for", "_compound", "in", "_stack", ".", "keys", "(", ")", ":", "for", "_element", "in", "_stack", "[", "_compound", "]", "[", "'elements'", "]", ":", "for", "_isotope", "in", "_stack", "[", "_compound", "]", "[", "_element", "]", "[", "'isotopes'", "]", "[", "'list'", "]", ":", "_y_axis", "=", "_stack_signal", "[", "_compound", "]", "[", "_element", "]", "[", "_isotope", "]", "[", "y_axis_tag", "]", "df", "[", "_compound", "+", "'/'", "+", "_element", "+", "'/'", "+", "_isotope", "]", "=", "_y_axis", "else", ":", "# export specified transmission or attenuation", "for", "_path_to_export", "in", "items_to_export", ":", "_path_to_export", "=", "list", "(", "_path_to_export", ")", "_live_path", "=", "_stack_signal", "_label", "=", "\"/\"", ".", "join", "(", "_path_to_export", ")", "while", "_path_to_export", ":", "_item", "=", "_path_to_export", ".", "pop", "(", "0", ")", "_live_path", "=", "_live_path", "[", "_item", "]", "_y_axis", "=", "_live_path", "[", "y_axis_tag", "]", "df", "[", "_label", "]", "=", "_y_axis", "else", ":", "# export sigma", "if", "y_axis", "==", "'sigma'", ":", "y_axis_tag", "=", "'sigma_b'", "else", ":", "y_axis_tag", "=", "'sigma_b_raw'", "# y_axis_tag = 'sigma_b_raw'", "_stack_sigma", "=", "self", ".", "stack_sigma", "if", "items_to_export", "is", "None", ":", "for", "_compound", "in", "_stack", ".", "keys", "(", ")", ":", "for", "_element", "in", "_stack", "[", "_compound", "]", "[", "'elements'", "]", ":", "_y_axis", "=", "_stack_sigma", "[", "_compound", "]", "[", "_element", "]", "[", "'sigma_b'", "]", "# No 'sigma_b_raw' at this level", "df", "[", "_compound", "+", "'/'", "+", "_element", "+", "'/atoms_per_cm3'", "]", "=", "_stack", "[", "_compound", "]", "[", "_element", "]", "[", "'atoms_per_cm3'", "]", "df", "[", "_compound", "+", "'/'", "+", "_element", "]", "=", "_y_axis", "if", "all_isotopes", ":", "for", "_isotope", "in", "_stack", "[", "_compound", "]", "[", "_element", "]", "[", "'isotopes'", "]", "[", "'list'", "]", ":", "_y_axis", "=", "_stack_sigma", "[", "_compound", "]", "[", "_element", "]", "[", "_isotope", "]", "[", "y_axis_tag", "]", "df", "[", "_compound", "+", "'/'", "+", "_element", "+", "'/'", "+", "_isotope", "]", "=", "_y_axis", "else", ":", "# export specified sigma", "for", "_path_to_export", "in", "items_to_export", ":", "if", "y_axis_tag", "[", "-", "3", ":", "]", "==", "'raw'", ":", "if", "len", "(", "_path_to_export", ")", "<", "3", ":", "raise", "ValueError", "(", "\"Getting raw sigma of '{}' at layer or element level is not supported. \"", "\"If it is a single element layer, please follow \"", "\"['layer', 'element', 'isotope'] format.\"", ".", "format", "(", "_path_to_export", "[", "0", "]", ")", ")", "else", ":", "if", "len", "(", "_path_to_export", ")", "<", "2", ":", "raise", "ValueError", "(", "\"Getting weighted sigma of '{}' at layer level is not supported. \"", "\"If it is a single element layer, please follow \"", "\"['layer', 'element'] format.\"", ".", "format", "(", "_path_to_export", "[", "0", "]", ")", ")", "_path_to_export", "=", "list", "(", "_path_to_export", ")", "_live_path", "=", "_stack_sigma", "_label", "=", "\"/\"", ".", "join", "(", "_path_to_export", ")", "while", "_path_to_export", ":", "_item", "=", "_path_to_export", ".", "pop", "(", "0", ")", "_live_path", "=", "_live_path", "[", "_item", "]", "_y_axis", "=", "_live_path", "[", "y_axis_tag", "]", "df", "[", "_label", "]", "=", "_y_axis", "if", "len", "(", "df", ".", "columns", ")", "<=", "1", ":", "raise", "ValueError", "(", "\"No y values have been selected to export!\"", ")", "if", "output_type", "==", "'csv'", ":", "if", "filename", "is", "None", ":", "filename", "=", "'data.csv'", "if", "'.csv'", "not", "in", "filename", ":", "filename", "+=", "'.csv'", "df", ".", "to_csv", "(", "filename", ",", "index", "=", "False", ")", "print", "(", "\"Exporting to file ('./{}') completed.\"", ".", "format", "(", "filename", ")", ")", "elif", "output_type", "==", "'clip'", ":", "df", ".", "to_clipboard", "(", "excel", "=", "True", ",", "index", "=", "False", ")", "print", "(", "'Exporting to clipboard completed.'", ")", "else", ":", "# output_type == 'df'", "return", "df" ]
output x and y values to clipboard or .csv file output the transmission or attenuation or sigma of compound, element and/or isotopes specified 'sigma_b' exported for each isotope is the product resulted from (sigma * isotopic ratio) 'atoms_per_cm3' of each element is also exported in 'sigma' mode based on molar mass within stack. :param output_type: export type : ['df', 'csv', 'clip'] :type output_type: str :param mixed: True -> display the total of each layer False -> not displayed :type mixed: boolean :param filename: string. filename (with .csv suffix) you would like to save as None -> export to clipboard :type filename: string :param x_axis: string. x type for export. Must in ['energy', 'lambda', 'time', 'number'] :param y_axis: string. y type for export. Must in ['transmission', 'attenuation', 'sigma', 'sigma_raw', 'miu_per_cm'] :param all_layers: boolean. True -> export all layers False -> not export :param all_elements: boolean. True -> export all elements signal False -> not export :param all_isotopes: boolean. True -> export all isotopes signal False -> not export :param items_to_export: array that describes what to export ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either 's' or 'us' or 'ns' Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :return: simulated resonance signals or sigma in the form of 'clipboard' or '.csv file' or 'pd.DataFrame'
[ "output", "x", "and", "y", "values", "to", "clipboard", "or", ".", "csv", "file", "output", "the", "transmission", "or", "attenuation", "or", "sigma", "of", "compound", "element", "and", "/", "or", "isotopes", "specified", "sigma_b", "exported", "for", "each", "isotope", "is", "the", "product", "resulted", "from", "(", "sigma", "*", "isotopic", "ratio", ")", "atoms_per_cm3", "of", "each", "element", "is", "also", "exported", "in", "sigma", "mode", "based", "on", "molar", "mass", "within", "stack", "." ]
python
train
ui/django-post_office
post_office/mail.py
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/mail.py#L23-L84
def create(sender, recipients=None, cc=None, bcc=None, subject='', message='', html_message='', context=None, scheduled_time=None, headers=None, template=None, priority=None, render_on_delivery=False, commit=True, backend=''): """ Creates an email from supplied keyword arguments. If template is specified, email subject and content will be rendered during delivery. """ priority = parse_priority(priority) status = None if priority == PRIORITY.now else STATUS.queued if recipients is None: recipients = [] if cc is None: cc = [] if bcc is None: bcc = [] if context is None: context = '' # If email is to be rendered during delivery, save all necessary # information if render_on_delivery: email = Email( from_email=sender, to=recipients, cc=cc, bcc=bcc, scheduled_time=scheduled_time, headers=headers, priority=priority, status=status, context=context, template=template, backend_alias=backend ) else: if template: subject = template.subject message = template.content html_message = template.html_content _context = Context(context or {}) subject = Template(subject).render(_context) message = Template(message).render(_context) html_message = Template(html_message).render(_context) email = Email( from_email=sender, to=recipients, cc=cc, bcc=bcc, subject=subject, message=message, html_message=html_message, scheduled_time=scheduled_time, headers=headers, priority=priority, status=status, backend_alias=backend ) if commit: email.save() return email
[ "def", "create", "(", "sender", ",", "recipients", "=", "None", ",", "cc", "=", "None", ",", "bcc", "=", "None", ",", "subject", "=", "''", ",", "message", "=", "''", ",", "html_message", "=", "''", ",", "context", "=", "None", ",", "scheduled_time", "=", "None", ",", "headers", "=", "None", ",", "template", "=", "None", ",", "priority", "=", "None", ",", "render_on_delivery", "=", "False", ",", "commit", "=", "True", ",", "backend", "=", "''", ")", ":", "priority", "=", "parse_priority", "(", "priority", ")", "status", "=", "None", "if", "priority", "==", "PRIORITY", ".", "now", "else", "STATUS", ".", "queued", "if", "recipients", "is", "None", ":", "recipients", "=", "[", "]", "if", "cc", "is", "None", ":", "cc", "=", "[", "]", "if", "bcc", "is", "None", ":", "bcc", "=", "[", "]", "if", "context", "is", "None", ":", "context", "=", "''", "# If email is to be rendered during delivery, save all necessary", "# information", "if", "render_on_delivery", ":", "email", "=", "Email", "(", "from_email", "=", "sender", ",", "to", "=", "recipients", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", "scheduled_time", "=", "scheduled_time", ",", "headers", "=", "headers", ",", "priority", "=", "priority", ",", "status", "=", "status", ",", "context", "=", "context", ",", "template", "=", "template", ",", "backend_alias", "=", "backend", ")", "else", ":", "if", "template", ":", "subject", "=", "template", ".", "subject", "message", "=", "template", ".", "content", "html_message", "=", "template", ".", "html_content", "_context", "=", "Context", "(", "context", "or", "{", "}", ")", "subject", "=", "Template", "(", "subject", ")", ".", "render", "(", "_context", ")", "message", "=", "Template", "(", "message", ")", ".", "render", "(", "_context", ")", "html_message", "=", "Template", "(", "html_message", ")", ".", "render", "(", "_context", ")", "email", "=", "Email", "(", "from_email", "=", "sender", ",", "to", "=", "recipients", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", "subject", "=", "subject", ",", "message", "=", "message", ",", "html_message", "=", "html_message", ",", "scheduled_time", "=", "scheduled_time", ",", "headers", "=", "headers", ",", "priority", "=", "priority", ",", "status", "=", "status", ",", "backend_alias", "=", "backend", ")", "if", "commit", ":", "email", ".", "save", "(", ")", "return", "email" ]
Creates an email from supplied keyword arguments. If template is specified, email subject and content will be rendered during delivery.
[ "Creates", "an", "email", "from", "supplied", "keyword", "arguments", ".", "If", "template", "is", "specified", "email", "subject", "and", "content", "will", "be", "rendered", "during", "delivery", "." ]
python
train
dwavesystems/dimod
dimod/higherorder/polynomial.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/higherorder/polynomial.py#L391-L421
def to_hising(self): """Construct a higher-order Ising problem from a binary polynomial. Returns: tuple: A 3-tuple of the form (`h`, `J`, `offset`) where `h` includes the linear biases, `J` has the higher-order biases and `offset` is the linear offset. Examples: >>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1, 'abc': -1}, dimod.SPIN) >>> h, J, off = poly.to_hising() >>> h {'a': -1} """ if self.vartype is Vartype.BINARY: return self.to_spin().to_hising() h = {} J = {} offset = 0 for term, bias in self.items(): if len(term) == 0: offset += bias elif len(term) == 1: v, = term h[v] = bias else: J[tuple(term)] = bias return h, J, offset
[ "def", "to_hising", "(", "self", ")", ":", "if", "self", ".", "vartype", "is", "Vartype", ".", "BINARY", ":", "return", "self", ".", "to_spin", "(", ")", ".", "to_hising", "(", ")", "h", "=", "{", "}", "J", "=", "{", "}", "offset", "=", "0", "for", "term", ",", "bias", "in", "self", ".", "items", "(", ")", ":", "if", "len", "(", "term", ")", "==", "0", ":", "offset", "+=", "bias", "elif", "len", "(", "term", ")", "==", "1", ":", "v", ",", "=", "term", "h", "[", "v", "]", "=", "bias", "else", ":", "J", "[", "tuple", "(", "term", ")", "]", "=", "bias", "return", "h", ",", "J", ",", "offset" ]
Construct a higher-order Ising problem from a binary polynomial. Returns: tuple: A 3-tuple of the form (`h`, `J`, `offset`) where `h` includes the linear biases, `J` has the higher-order biases and `offset` is the linear offset. Examples: >>> poly = dimod.BinaryPolynomial({'a': -1, 'ab': 1, 'abc': -1}, dimod.SPIN) >>> h, J, off = poly.to_hising() >>> h {'a': -1}
[ "Construct", "a", "higher", "-", "order", "Ising", "problem", "from", "a", "binary", "polynomial", "." ]
python
train
GNS3/gns3-server
gns3server/controller/notification.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/notification.py#L122-L132
def _send_event_to_all(self, action, event): """ Send an event to all the client listening for notifications on all projects :param action: Action name :param event: Event to send """ for project_listeners in self._listeners.values(): for listener in project_listeners: listener.put_nowait((action, event, {}))
[ "def", "_send_event_to_all", "(", "self", ",", "action", ",", "event", ")", ":", "for", "project_listeners", "in", "self", ".", "_listeners", ".", "values", "(", ")", ":", "for", "listener", "in", "project_listeners", ":", "listener", ".", "put_nowait", "(", "(", "action", ",", "event", ",", "{", "}", ")", ")" ]
Send an event to all the client listening for notifications on all projects :param action: Action name :param event: Event to send
[ "Send", "an", "event", "to", "all", "the", "client", "listening", "for", "notifications", "on", "all", "projects" ]
python
train
delph-in/pydelphin
delphin/mrs/path.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/path.py#L321-L335
def merge(base, obj, location=None): """ merge is like XmrsPathNode.update() except it raises errors on unequal non-None values. """ # pump object to it's location with dummy nodes while location: axis = location.pop() obj = XmrsPathNode(None, None, links={axis: obj}) if base is None: return obj _merge(base, obj) # if isinstance(base, XmrsPath): # base.calculate_metrics() return base
[ "def", "merge", "(", "base", ",", "obj", ",", "location", "=", "None", ")", ":", "# pump object to it's location with dummy nodes", "while", "location", ":", "axis", "=", "location", ".", "pop", "(", ")", "obj", "=", "XmrsPathNode", "(", "None", ",", "None", ",", "links", "=", "{", "axis", ":", "obj", "}", ")", "if", "base", "is", "None", ":", "return", "obj", "_merge", "(", "base", ",", "obj", ")", "# if isinstance(base, XmrsPath):", "# base.calculate_metrics()", "return", "base" ]
merge is like XmrsPathNode.update() except it raises errors on unequal non-None values.
[ "merge", "is", "like", "XmrsPathNode", ".", "update", "()", "except", "it", "raises", "errors", "on", "unequal", "non", "-", "None", "values", "." ]
python
train
fastai/fastai
fastai/collab.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/collab.py#L55-L68
def from_df(cls, ratings:DataFrame, valid_pct:float=0.2, user_name:Optional[str]=None, item_name:Optional[str]=None, rating_name:Optional[str]=None, test:DataFrame=None, seed:int=None, path:PathOrStr='.', bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False) -> 'CollabDataBunch': "Create a `DataBunch` suitable for collaborative filtering from `ratings`." user_name = ifnone(user_name, ratings.columns[0]) item_name = ifnone(item_name, ratings.columns[1]) rating_name = ifnone(rating_name,ratings.columns[2]) cat_names = [user_name,item_name] src = (CollabList.from_df(ratings, cat_names=cat_names, procs=Categorify) .split_by_rand_pct(valid_pct=valid_pct, seed=seed).label_from_df(cols=rating_name)) if test is not None: src.add_test(CollabList.from_df(test, cat_names=cat_names)) return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check)
[ "def", "from_df", "(", "cls", ",", "ratings", ":", "DataFrame", ",", "valid_pct", ":", "float", "=", "0.2", ",", "user_name", ":", "Optional", "[", "str", "]", "=", "None", ",", "item_name", ":", "Optional", "[", "str", "]", "=", "None", ",", "rating_name", ":", "Optional", "[", "str", "]", "=", "None", ",", "test", ":", "DataFrame", "=", "None", ",", "seed", ":", "int", "=", "None", ",", "path", ":", "PathOrStr", "=", "'.'", ",", "bs", ":", "int", "=", "64", ",", "val_bs", ":", "int", "=", "None", ",", "num_workers", ":", "int", "=", "defaults", ".", "cpus", ",", "dl_tfms", ":", "Optional", "[", "Collection", "[", "Callable", "]", "]", "=", "None", ",", "device", ":", "torch", ".", "device", "=", "None", ",", "collate_fn", ":", "Callable", "=", "data_collate", ",", "no_check", ":", "bool", "=", "False", ")", "->", "'CollabDataBunch'", ":", "user_name", "=", "ifnone", "(", "user_name", ",", "ratings", ".", "columns", "[", "0", "]", ")", "item_name", "=", "ifnone", "(", "item_name", ",", "ratings", ".", "columns", "[", "1", "]", ")", "rating_name", "=", "ifnone", "(", "rating_name", ",", "ratings", ".", "columns", "[", "2", "]", ")", "cat_names", "=", "[", "user_name", ",", "item_name", "]", "src", "=", "(", "CollabList", ".", "from_df", "(", "ratings", ",", "cat_names", "=", "cat_names", ",", "procs", "=", "Categorify", ")", ".", "split_by_rand_pct", "(", "valid_pct", "=", "valid_pct", ",", "seed", "=", "seed", ")", ".", "label_from_df", "(", "cols", "=", "rating_name", ")", ")", "if", "test", "is", "not", "None", ":", "src", ".", "add_test", "(", "CollabList", ".", "from_df", "(", "test", ",", "cat_names", "=", "cat_names", ")", ")", "return", "src", ".", "databunch", "(", "path", "=", "path", ",", "bs", "=", "bs", ",", "val_bs", "=", "val_bs", ",", "num_workers", "=", "num_workers", ",", "device", "=", "device", ",", "collate_fn", "=", "collate_fn", ",", "no_check", "=", "no_check", ")" ]
Create a `DataBunch` suitable for collaborative filtering from `ratings`.
[ "Create", "a", "DataBunch", "suitable", "for", "collaborative", "filtering", "from", "ratings", "." ]
python
train
gbiggs/rtctree
rtctree/config_set.py
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/config_set.py#L71-L75
def _reload(self, object, description, data): '''Reload the configuration set data.''' self._object = object self._description = description self._data = data
[ "def", "_reload", "(", "self", ",", "object", ",", "description", ",", "data", ")", ":", "self", ".", "_object", "=", "object", "self", ".", "_description", "=", "description", "self", ".", "_data", "=", "data" ]
Reload the configuration set data.
[ "Reload", "the", "configuration", "set", "data", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/executable_builder.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/executable_builder.py#L166-L200
def assert_consistent_reg_options(exec_type, json_spec, executable_builder_exeception): """ Validates the "regionalOptions" field and verifies all the regions used in "regionalOptions" have the same options. """ reg_options_spec = json_spec.get('regionalOptions') json_fn = 'dxapp.json' if exec_type == 'app' else 'dxworkflow.json' if not isinstance(reg_options_spec, dict): raise executable_builder_exeception("The field 'regionalOptions' in must be a mapping") if not reg_options_spec: raise executable_builder_exeception( "The field 'regionalOptions' in " + json_fn + " must be a non-empty mapping") regional_options_list = list(reg_options_spec.items()) for region, opts_for_region in regional_options_list: if not isinstance(opts_for_region, dict): raise executable_builder_exeception("The field 'regionalOptions['" + region + "']' in " + json_fn + " must be a mapping") if set(opts_for_region.keys()) != set(regional_options_list[0][1].keys()): if set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()): with_key, without_key = region, regional_options_list[0][0] key_name = next(iter(set(opts_for_region.keys()) - set(regional_options_list[0][1].keys()))) else: with_key, without_key = regional_options_list[0][0], region key_name = next(iter(set(regional_options_list[0][1].keys()) - set(opts_for_region.keys()))) raise executable_builder_exeception( "All regions in regionalOptions must specify the same options; " + "%s was given for %s but not for %s" % (key_name, with_key, without_key) ) if exec_type == 'app': for key in opts_for_region: if key in json_spec.get('runSpec', {}): raise executable_builder_exeception( key + " cannot be given in both runSpec and in regional options for " + region)
[ "def", "assert_consistent_reg_options", "(", "exec_type", ",", "json_spec", ",", "executable_builder_exeception", ")", ":", "reg_options_spec", "=", "json_spec", ".", "get", "(", "'regionalOptions'", ")", "json_fn", "=", "'dxapp.json'", "if", "exec_type", "==", "'app'", "else", "'dxworkflow.json'", "if", "not", "isinstance", "(", "reg_options_spec", ",", "dict", ")", ":", "raise", "executable_builder_exeception", "(", "\"The field 'regionalOptions' in must be a mapping\"", ")", "if", "not", "reg_options_spec", ":", "raise", "executable_builder_exeception", "(", "\"The field 'regionalOptions' in \"", "+", "json_fn", "+", "\" must be a non-empty mapping\"", ")", "regional_options_list", "=", "list", "(", "reg_options_spec", ".", "items", "(", ")", ")", "for", "region", ",", "opts_for_region", "in", "regional_options_list", ":", "if", "not", "isinstance", "(", "opts_for_region", ",", "dict", ")", ":", "raise", "executable_builder_exeception", "(", "\"The field 'regionalOptions['\"", "+", "region", "+", "\"']' in \"", "+", "json_fn", "+", "\" must be a mapping\"", ")", "if", "set", "(", "opts_for_region", ".", "keys", "(", ")", ")", "!=", "set", "(", "regional_options_list", "[", "0", "]", "[", "1", "]", ".", "keys", "(", ")", ")", ":", "if", "set", "(", "opts_for_region", ".", "keys", "(", ")", ")", "-", "set", "(", "regional_options_list", "[", "0", "]", "[", "1", "]", ".", "keys", "(", ")", ")", ":", "with_key", ",", "without_key", "=", "region", ",", "regional_options_list", "[", "0", "]", "[", "0", "]", "key_name", "=", "next", "(", "iter", "(", "set", "(", "opts_for_region", ".", "keys", "(", ")", ")", "-", "set", "(", "regional_options_list", "[", "0", "]", "[", "1", "]", ".", "keys", "(", ")", ")", ")", ")", "else", ":", "with_key", ",", "without_key", "=", "regional_options_list", "[", "0", "]", "[", "0", "]", ",", "region", "key_name", "=", "next", "(", "iter", "(", "set", "(", "regional_options_list", "[", "0", "]", "[", "1", "]", ".", "keys", "(", ")", ")", "-", "set", "(", "opts_for_region", ".", "keys", "(", ")", ")", ")", ")", "raise", "executable_builder_exeception", "(", "\"All regions in regionalOptions must specify the same options; \"", "+", "\"%s was given for %s but not for %s\"", "%", "(", "key_name", ",", "with_key", ",", "without_key", ")", ")", "if", "exec_type", "==", "'app'", ":", "for", "key", "in", "opts_for_region", ":", "if", "key", "in", "json_spec", ".", "get", "(", "'runSpec'", ",", "{", "}", ")", ":", "raise", "executable_builder_exeception", "(", "key", "+", "\" cannot be given in both runSpec and in regional options for \"", "+", "region", ")" ]
Validates the "regionalOptions" field and verifies all the regions used in "regionalOptions" have the same options.
[ "Validates", "the", "regionalOptions", "field", "and", "verifies", "all", "the", "regions", "used", "in", "regionalOptions", "have", "the", "same", "options", "." ]
python
train
dade-ai/snipy
snipy/plt/ploting.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L279-L287
def matshow(*args, **kwargs): """ imshow without interpolation like as matshow :param args: :param kwargs: :return: """ kwargs['interpolation'] = kwargs.pop('interpolation', 'none') return plt.imshow(*args, **kwargs)
[ "def", "matshow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'interpolation'", "]", "=", "kwargs", ".", "pop", "(", "'interpolation'", ",", "'none'", ")", "return", "plt", ".", "imshow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
imshow without interpolation like as matshow :param args: :param kwargs: :return:
[ "imshow", "without", "interpolation", "like", "as", "matshow", ":", "param", "args", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
python
valid
aio-libs/yarl
yarl/__init__.py
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L435-L453
def host(self): """Decoded host part of URL. None for relative URLs. """ raw = self.raw_host if raw is None: return None if "%" in raw: # Hack for scoped IPv6 addresses like # fe80::2%Проверка # presence of '%' sign means only IPv6 address, so idna is useless. return raw try: return idna.decode(raw.encode("ascii")) except UnicodeError: # e.g. '::1' return raw.encode("ascii").decode("idna")
[ "def", "host", "(", "self", ")", ":", "raw", "=", "self", ".", "raw_host", "if", "raw", "is", "None", ":", "return", "None", "if", "\"%\"", "in", "raw", ":", "# Hack for scoped IPv6 addresses like", "# fe80::2%Проверка", "# presence of '%' sign means only IPv6 address, so idna is useless.", "return", "raw", "try", ":", "return", "idna", ".", "decode", "(", "raw", ".", "encode", "(", "\"ascii\"", ")", ")", "except", "UnicodeError", ":", "# e.g. '::1'", "return", "raw", ".", "encode", "(", "\"ascii\"", ")", ".", "decode", "(", "\"idna\"", ")" ]
Decoded host part of URL. None for relative URLs.
[ "Decoded", "host", "part", "of", "URL", "." ]
python
train
andychase/reparse
reparse/expression.py
https://github.com/andychase/reparse/blob/5f46cdd0fc4e239c0ddeca4b542e48a5ae95c508/reparse/expression.py#L108-L121
def AlternatesGroup(expressions, final_function, name=""): """ Group expressions using the OR character ``|`` >>> from collections import namedtuple >>> expr = namedtuple('expr', 'regex group_lengths run')('(1)', [1], None) >>> grouping = AlternatesGroup([expr, expr], lambda f: None, 'yeah') >>> grouping.regex # doctest: +IGNORE_UNICODE '(?:(1))|(?:(1))' >>> grouping.group_lengths [1, 1] """ inbetweens = ["|"] * (len(expressions) + 1) inbetweens[0] = "" inbetweens[-1] = "" return Group(expressions, final_function, inbetweens, name)
[ "def", "AlternatesGroup", "(", "expressions", ",", "final_function", ",", "name", "=", "\"\"", ")", ":", "inbetweens", "=", "[", "\"|\"", "]", "*", "(", "len", "(", "expressions", ")", "+", "1", ")", "inbetweens", "[", "0", "]", "=", "\"\"", "inbetweens", "[", "-", "1", "]", "=", "\"\"", "return", "Group", "(", "expressions", ",", "final_function", ",", "inbetweens", ",", "name", ")" ]
Group expressions using the OR character ``|`` >>> from collections import namedtuple >>> expr = namedtuple('expr', 'regex group_lengths run')('(1)', [1], None) >>> grouping = AlternatesGroup([expr, expr], lambda f: None, 'yeah') >>> grouping.regex # doctest: +IGNORE_UNICODE '(?:(1))|(?:(1))' >>> grouping.group_lengths [1, 1]
[ "Group", "expressions", "using", "the", "OR", "character", "|", ">>>", "from", "collections", "import", "namedtuple", ">>>", "expr", "=", "namedtuple", "(", "expr", "regex", "group_lengths", "run", ")", "(", "(", "1", ")", "[", "1", "]", "None", ")", ">>>", "grouping", "=", "AlternatesGroup", "(", "[", "expr", "expr", "]", "lambda", "f", ":", "None", "yeah", ")", ">>>", "grouping", ".", "regex", "#", "doctest", ":", "+", "IGNORE_UNICODE", "(", "?", ":", "(", "1", "))", "|", "(", "?", ":", "(", "1", "))", ">>>", "grouping", ".", "group_lengths", "[", "1", "1", "]" ]
python
train
sixty-north/cosmic-ray
src/cosmic_ray/operators/zero_iteration_for_loop.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/operators/zero_iteration_for_loop.py#L17-L24
def mutate(self, node, index): """Modify the For loop to evaluate to None""" assert index == 0 assert isinstance(node, ForStmt) empty_list = parso.parse(' []') node.children[3] = empty_list return node
[ "def", "mutate", "(", "self", ",", "node", ",", "index", ")", ":", "assert", "index", "==", "0", "assert", "isinstance", "(", "node", ",", "ForStmt", ")", "empty_list", "=", "parso", ".", "parse", "(", "' []'", ")", "node", ".", "children", "[", "3", "]", "=", "empty_list", "return", "node" ]
Modify the For loop to evaluate to None
[ "Modify", "the", "For", "loop", "to", "evaluate", "to", "None" ]
python
train
marcomusy/vtkplotter
vtkplotter/colors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/colors.py#L758-L874
def printHistogram(data, bins=10, height=10, logscale=False, minbin=0, horizontal=False, char=u"\U00002589", c=None, bold=True, title='Histogram'): """ Ascii histogram printing. :param int bins: number of histogram bins :param int height: height of the histogram in character units :param bool logscale: use logscale for frequencies :param int minbin: ignore bins before minbin :param bool horizontal: show histogram horizontally :param str char: character to be used :param str,int c: ascii color :param bool char: use boldface :param str title: histogram title :Example: .. code-block:: python from vtkplotter import printHistogram import numpy as np d = np.random.normal(size=1000) printHistogram(d, c='blue', logscale=True, title='my scalars') printHistogram(d, c=1, horizontal=1) |printhisto| """ # Adapted from http://pyinsci.blogspot.com/2009/10/ascii-histograms.html if not horizontal: # better aspect ratio bins *= 2 isimg = isinstance(data, vtk.vtkImageData) isvol = isinstance(data, vtk.vtkVolume) if isimg or isvol: if isvol: img = data.image else: img = data dims = img.GetDimensions() nvx = min(100000, dims[0]*dims[1]*dims[2]) idxs = np.random.randint(0, min(dims), size=(nvx, 3)) data = [] for ix, iy, iz in idxs: d = img.GetScalarComponentAsFloat(ix, iy, iz, 0) data.append(d) elif isinstance(data, vtk.vtkActor): arr = data.polydata().GetPointData().GetScalars() if not arr: arr = data.polydata().GetCellData().GetScalars() if not arr: return from vtk.util.numpy_support import vtk_to_numpy data = vtk_to_numpy(arr) h = np.histogram(data, bins=bins) if minbin: hi = h[0][minbin:-1] else: hi = h[0] if sys.version_info[0] < 3 and char == u"\U00002589": char = "*" # python2 hack if char == u"\U00002589" and horizontal: char = u"\U00002586" entrs = "\t(entries=" + str(len(data)) + ")" if logscale: h0 = np.log10(hi+1) maxh0 = int(max(h0)*100)/100 title = '(logscale) ' + title + entrs else: h0 = hi maxh0 = max(h0) title = title + entrs def _v(): his = "" if title: his += title +"\n" bars = h0 / maxh0 * height for l in reversed(range(1, height + 1)): line = "" if l == height: line = "%s " % maxh0 else: line = " |" + " " * (len(str(maxh0))-3) for c in bars: if c >= np.ceil(l): line += char else: line += " " line += "\n" his += line his += "%.2f" % h[1][0] + "." * (bins) + "%.2f" % h[1][-1] + "\n" return his def _h(): his = "" if title: his += title +"\n" xl = ["%.2f" % n for n in h[1]] lxl = [len(l) for l in xl] bars = h0 / maxh0 * height his += " " * int(max(bars) + 2 + max(lxl)) + "%s\n" % maxh0 for i, c in enumerate(bars): line = (xl[i] + " " * int(max(lxl) - lxl[i]) + "| " + char * int(c) + "\n") his += line return his if horizontal: height *= 2 printc(_h(), c=c, bold=bold) else: printc(_v(), c=c, bold=bold)
[ "def", "printHistogram", "(", "data", ",", "bins", "=", "10", ",", "height", "=", "10", ",", "logscale", "=", "False", ",", "minbin", "=", "0", ",", "horizontal", "=", "False", ",", "char", "=", "u\"\\U00002589\"", ",", "c", "=", "None", ",", "bold", "=", "True", ",", "title", "=", "'Histogram'", ")", ":", "# Adapted from http://pyinsci.blogspot.com/2009/10/ascii-histograms.html", "if", "not", "horizontal", ":", "# better aspect ratio", "bins", "*=", "2", "isimg", "=", "isinstance", "(", "data", ",", "vtk", ".", "vtkImageData", ")", "isvol", "=", "isinstance", "(", "data", ",", "vtk", ".", "vtkVolume", ")", "if", "isimg", "or", "isvol", ":", "if", "isvol", ":", "img", "=", "data", ".", "image", "else", ":", "img", "=", "data", "dims", "=", "img", ".", "GetDimensions", "(", ")", "nvx", "=", "min", "(", "100000", ",", "dims", "[", "0", "]", "*", "dims", "[", "1", "]", "*", "dims", "[", "2", "]", ")", "idxs", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "min", "(", "dims", ")", ",", "size", "=", "(", "nvx", ",", "3", ")", ")", "data", "=", "[", "]", "for", "ix", ",", "iy", ",", "iz", "in", "idxs", ":", "d", "=", "img", ".", "GetScalarComponentAsFloat", "(", "ix", ",", "iy", ",", "iz", ",", "0", ")", "data", ".", "append", "(", "d", ")", "elif", "isinstance", "(", "data", ",", "vtk", ".", "vtkActor", ")", ":", "arr", "=", "data", ".", "polydata", "(", ")", ".", "GetPointData", "(", ")", ".", "GetScalars", "(", ")", "if", "not", "arr", ":", "arr", "=", "data", ".", "polydata", "(", ")", ".", "GetCellData", "(", ")", ".", "GetScalars", "(", ")", "if", "not", "arr", ":", "return", "from", "vtk", ".", "util", ".", "numpy_support", "import", "vtk_to_numpy", "data", "=", "vtk_to_numpy", "(", "arr", ")", "h", "=", "np", ".", "histogram", "(", "data", ",", "bins", "=", "bins", ")", "if", "minbin", ":", "hi", "=", "h", "[", "0", "]", "[", "minbin", ":", "-", "1", "]", "else", ":", "hi", "=", "h", "[", "0", "]", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", "and", "char", "==", "u\"\\U00002589\"", ":", "char", "=", "\"*\"", "# python2 hack", "if", "char", "==", "u\"\\U00002589\"", "and", "horizontal", ":", "char", "=", "u\"\\U00002586\"", "entrs", "=", "\"\\t(entries=\"", "+", "str", "(", "len", "(", "data", ")", ")", "+", "\")\"", "if", "logscale", ":", "h0", "=", "np", ".", "log10", "(", "hi", "+", "1", ")", "maxh0", "=", "int", "(", "max", "(", "h0", ")", "*", "100", ")", "/", "100", "title", "=", "'(logscale) '", "+", "title", "+", "entrs", "else", ":", "h0", "=", "hi", "maxh0", "=", "max", "(", "h0", ")", "title", "=", "title", "+", "entrs", "def", "_v", "(", ")", ":", "his", "=", "\"\"", "if", "title", ":", "his", "+=", "title", "+", "\"\\n\"", "bars", "=", "h0", "/", "maxh0", "*", "height", "for", "l", "in", "reversed", "(", "range", "(", "1", ",", "height", "+", "1", ")", ")", ":", "line", "=", "\"\"", "if", "l", "==", "height", ":", "line", "=", "\"%s \"", "%", "maxh0", "else", ":", "line", "=", "\" |\"", "+", "\" \"", "*", "(", "len", "(", "str", "(", "maxh0", ")", ")", "-", "3", ")", "for", "c", "in", "bars", ":", "if", "c", ">=", "np", ".", "ceil", "(", "l", ")", ":", "line", "+=", "char", "else", ":", "line", "+=", "\" \"", "line", "+=", "\"\\n\"", "his", "+=", "line", "his", "+=", "\"%.2f\"", "%", "h", "[", "1", "]", "[", "0", "]", "+", "\".\"", "*", "(", "bins", ")", "+", "\"%.2f\"", "%", "h", "[", "1", "]", "[", "-", "1", "]", "+", "\"\\n\"", "return", "his", "def", "_h", "(", ")", ":", "his", "=", "\"\"", "if", "title", ":", "his", "+=", "title", "+", "\"\\n\"", "xl", "=", "[", "\"%.2f\"", "%", "n", "for", "n", "in", "h", "[", "1", "]", "]", "lxl", "=", "[", "len", "(", "l", ")", "for", "l", "in", "xl", "]", "bars", "=", "h0", "/", "maxh0", "*", "height", "his", "+=", "\" \"", "*", "int", "(", "max", "(", "bars", ")", "+", "2", "+", "max", "(", "lxl", ")", ")", "+", "\"%s\\n\"", "%", "maxh0", "for", "i", ",", "c", "in", "enumerate", "(", "bars", ")", ":", "line", "=", "(", "xl", "[", "i", "]", "+", "\" \"", "*", "int", "(", "max", "(", "lxl", ")", "-", "lxl", "[", "i", "]", ")", "+", "\"| \"", "+", "char", "*", "int", "(", "c", ")", "+", "\"\\n\"", ")", "his", "+=", "line", "return", "his", "if", "horizontal", ":", "height", "*=", "2", "printc", "(", "_h", "(", ")", ",", "c", "=", "c", ",", "bold", "=", "bold", ")", "else", ":", "printc", "(", "_v", "(", ")", ",", "c", "=", "c", ",", "bold", "=", "bold", ")" ]
Ascii histogram printing. :param int bins: number of histogram bins :param int height: height of the histogram in character units :param bool logscale: use logscale for frequencies :param int minbin: ignore bins before minbin :param bool horizontal: show histogram horizontally :param str char: character to be used :param str,int c: ascii color :param bool char: use boldface :param str title: histogram title :Example: .. code-block:: python from vtkplotter import printHistogram import numpy as np d = np.random.normal(size=1000) printHistogram(d, c='blue', logscale=True, title='my scalars') printHistogram(d, c=1, horizontal=1) |printhisto|
[ "Ascii", "histogram", "printing", "." ]
python
train
apache/incubator-superset
superset/connectors/base/models.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/base/models.py#L166-L215
def data(self): """Data representation of the datasource sent to the frontend""" order_by_choices = [] # self.column_names return sorted column_names for s in self.column_names: s = str(s or '') order_by_choices.append((json.dumps([s, True]), s + ' [asc]')) order_by_choices.append((json.dumps([s, False]), s + ' [desc]')) verbose_map = {'__timestamp': 'Time'} verbose_map.update({ o.metric_name: o.verbose_name or o.metric_name for o in self.metrics }) verbose_map.update({ o.column_name: o.verbose_name or o.column_name for o in self.columns }) return { # simple fields 'id': self.id, 'column_formats': self.column_formats, 'description': self.description, 'database': self.database.data, # pylint: disable=no-member 'default_endpoint': self.default_endpoint, 'filter_select': self.filter_select_enabled, # TODO deprecate 'filter_select_enabled': self.filter_select_enabled, 'name': self.name, 'datasource_name': self.datasource_name, 'type': self.type, 'schema': self.schema, 'offset': self.offset, 'cache_timeout': self.cache_timeout, 'params': self.params, 'perm': self.perm, 'edit_url': self.url, # sqla-specific 'sql': self.sql, # one to many 'columns': [o.data for o in self.columns], 'metrics': [o.data for o in self.metrics], # TODO deprecate, move logic to JS 'order_by_choices': order_by_choices, 'owners': [owner.id for owner in self.owners], 'verbose_map': verbose_map, 'select_star': self.select_star, }
[ "def", "data", "(", "self", ")", ":", "order_by_choices", "=", "[", "]", "# self.column_names return sorted column_names", "for", "s", "in", "self", ".", "column_names", ":", "s", "=", "str", "(", "s", "or", "''", ")", "order_by_choices", ".", "append", "(", "(", "json", ".", "dumps", "(", "[", "s", ",", "True", "]", ")", ",", "s", "+", "' [asc]'", ")", ")", "order_by_choices", ".", "append", "(", "(", "json", ".", "dumps", "(", "[", "s", ",", "False", "]", ")", ",", "s", "+", "' [desc]'", ")", ")", "verbose_map", "=", "{", "'__timestamp'", ":", "'Time'", "}", "verbose_map", ".", "update", "(", "{", "o", ".", "metric_name", ":", "o", ".", "verbose_name", "or", "o", ".", "metric_name", "for", "o", "in", "self", ".", "metrics", "}", ")", "verbose_map", ".", "update", "(", "{", "o", ".", "column_name", ":", "o", ".", "verbose_name", "or", "o", ".", "column_name", "for", "o", "in", "self", ".", "columns", "}", ")", "return", "{", "# simple fields", "'id'", ":", "self", ".", "id", ",", "'column_formats'", ":", "self", ".", "column_formats", ",", "'description'", ":", "self", ".", "description", ",", "'database'", ":", "self", ".", "database", ".", "data", ",", "# pylint: disable=no-member", "'default_endpoint'", ":", "self", ".", "default_endpoint", ",", "'filter_select'", ":", "self", ".", "filter_select_enabled", ",", "# TODO deprecate", "'filter_select_enabled'", ":", "self", ".", "filter_select_enabled", ",", "'name'", ":", "self", ".", "name", ",", "'datasource_name'", ":", "self", ".", "datasource_name", ",", "'type'", ":", "self", ".", "type", ",", "'schema'", ":", "self", ".", "schema", ",", "'offset'", ":", "self", ".", "offset", ",", "'cache_timeout'", ":", "self", ".", "cache_timeout", ",", "'params'", ":", "self", ".", "params", ",", "'perm'", ":", "self", ".", "perm", ",", "'edit_url'", ":", "self", ".", "url", ",", "# sqla-specific", "'sql'", ":", "self", ".", "sql", ",", "# one to many", "'columns'", ":", "[", "o", ".", "data", "for", "o", "in", "self", ".", "columns", "]", ",", "'metrics'", ":", "[", "o", ".", "data", "for", "o", "in", "self", ".", "metrics", "]", ",", "# TODO deprecate, move logic to JS", "'order_by_choices'", ":", "order_by_choices", ",", "'owners'", ":", "[", "owner", ".", "id", "for", "owner", "in", "self", ".", "owners", "]", ",", "'verbose_map'", ":", "verbose_map", ",", "'select_star'", ":", "self", ".", "select_star", ",", "}" ]
Data representation of the datasource sent to the frontend
[ "Data", "representation", "of", "the", "datasource", "sent", "to", "the", "frontend" ]
python
train
dmwm/DBS
Server/Python/src/dbs/business/DBSBlockInsert.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSBlockInsert.py#L430-L562
def insertOutputModuleConfig(self, remoteConfig, migration=False): """ Insert Release version, application, parameter set hashes and the map(output module config). """ otptIdList = [] missingList = [] conn = self.dbi.connection() try: for c in remoteConfig: cfgid = self.otptModCfgid.execute(conn, app = c["app_name"], release_version = c["release_version"], pset_hash = c["pset_hash"], output_label = c["output_module_label"], global_tag=c['global_tag']) if cfgid <= 0 : missingList.append(c) else: key = (c['app_name'] + ':' + c['release_version'] + ':' + c['pset_hash'] + ':' + c['output_module_label'] + ':' + c['global_tag']) self.datasetCache['conf'][key] = cfgid otptIdList.append(cfgid) #print "About to set cfgid: %s" % str(cfgid) except KeyError as ex: if conn:conn.close() dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \ KeyError exception: %s. " %ex.args[0], self.logger.exception, "DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0] ) except Exception as ex: if conn:conn.close() raise if len(missingList)==0: if conn:conn.close() return otptIdList #Now insert the missing configs try: #tran = conn.begin() for m in missingList: # Start a new transaction # This is to see if we can get better results # by committing early if we're submitting # multiple blocks with similar features tran = conn.begin() #Now insert the config # Sort out the mess # We're having some problems with different threads # committing different pieces at the same time # This makes the output module config ID wrong # Trying to catch this via exception handling on duplication # Start a new transaction #global_tag is now required. YG 03/08/2011 try: cfgid = 0 if not migration: m['create_by'] = dbsUtils().getCreateBy() m['creation_date'] = dbsUtils().getTime() configObj = {"release_version": m["release_version"], "pset_hash": m["pset_hash"], "pset_name":m.get('pset_name', None), "app_name": m["app_name"], 'output_module_label' : m['output_module_label'], 'global_tag' : m['global_tag'], 'scenario' : m.get('scenario', None), 'creation_date' : m['creation_date'], 'create_by':m['create_by'] } self.otptModCfgin.execute(conn, configObj, tran) tran.commit() tran = None except KeyError as ex: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \ KeyError exception: %s. " %ex.args[0], self.logger.exception, "DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0]) except exceptions.IntegrityError as ex: #Another job inserted it just 1/100000 second earlier than #you!! YG 11/17/2010 if str(ex).find("ORA-00001") != -1 or str(ex).lower().find("duplicate") !=-1: if str(ex).find("TUC_OMC_1") != -1: #the config is already in db, get the ID later pass else: #reinsert it if one or two or three of the three attributes (vresion, hash and app) are inserted #just 1/100000 second eailer. try: self.otptModCfgin.execute(conn, configObj, tran) tran.commit() tran = None except exceptions.IntegrityError as ex: if (str(ex).find("ORA-00001") != -1 and str(ex).find("TUC_OMC_1"))\ or str(ex).lower().find("duplicate") != -1: pass else: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-invalid-input2', 'Invalid data when insert Configure. ', self.logger.exception, 'Invalid data when insert Configure. '+ str(ex)) elif str(ex).find("ORA-01400") > -1: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler("dbsException-missing-data", "Missing data when inserting Configure. ", self.logger.exception, str(ex)) else: if tran:tran.rollback() if conn:conn.close() dbsExceptionHandler('dbsException-invalid-input2', 'Invalid data when insert Configure. ', self.logger.exception, 'Invalid data when insert Configure. '+ str(ex)) except exceptions as ex3: if tran:tran.rollback() if conn:conn.close() raise ex3 cfgid = self.otptModCfgid.execute(conn, app = m["app_name"], release_version = m["release_version"], pset_hash = m["pset_hash"], output_label = m["output_module_label"], global_tag=m['global_tag']) otptIdList.append(cfgid) key = (m['app_name'] + ':' + m['release_version'] + ':' + m['pset_hash'] + ':' +m['output_module_label'] + ':' + m['global_tag']) self.datasetCache['conf'][key] = cfgid finally: if tran:tran.rollback() if conn:conn.close() return otptIdList
[ "def", "insertOutputModuleConfig", "(", "self", ",", "remoteConfig", ",", "migration", "=", "False", ")", ":", "otptIdList", "=", "[", "]", "missingList", "=", "[", "]", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "for", "c", "in", "remoteConfig", ":", "cfgid", "=", "self", ".", "otptModCfgid", ".", "execute", "(", "conn", ",", "app", "=", "c", "[", "\"app_name\"", "]", ",", "release_version", "=", "c", "[", "\"release_version\"", "]", ",", "pset_hash", "=", "c", "[", "\"pset_hash\"", "]", ",", "output_label", "=", "c", "[", "\"output_module_label\"", "]", ",", "global_tag", "=", "c", "[", "'global_tag'", "]", ")", "if", "cfgid", "<=", "0", ":", "missingList", ".", "append", "(", "c", ")", "else", ":", "key", "=", "(", "c", "[", "'app_name'", "]", "+", "':'", "+", "c", "[", "'release_version'", "]", "+", "':'", "+", "c", "[", "'pset_hash'", "]", "+", "':'", "+", "c", "[", "'output_module_label'", "]", "+", "':'", "+", "c", "[", "'global_tag'", "]", ")", "self", ".", "datasetCache", "[", "'conf'", "]", "[", "key", "]", "=", "cfgid", "otptIdList", ".", "append", "(", "cfgid", ")", "#print \"About to set cfgid: %s\" % str(cfgid)", "except", "KeyError", "as", "ex", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"DBSBlockInsert/insertOutputModuleConfig: \\\n KeyError exception: %s. \"", "%", "ex", ".", "args", "[", "0", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. \"", "%", "ex", ".", "args", "[", "0", "]", ")", "except", "Exception", "as", "ex", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "if", "len", "(", "missingList", ")", "==", "0", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "return", "otptIdList", "#Now insert the missing configs", "try", ":", "#tran = conn.begin()", "for", "m", "in", "missingList", ":", "# Start a new transaction", "# This is to see if we can get better results", "# by committing early if we're submitting", "# multiple blocks with similar features", "tran", "=", "conn", ".", "begin", "(", ")", "#Now insert the config", "# Sort out the mess", "# We're having some problems with different threads", "# committing different pieces at the same time", "# This makes the output module config ID wrong", "# Trying to catch this via exception handling on duplication", "# Start a new transaction", "#global_tag is now required. YG 03/08/2011", "try", ":", "cfgid", "=", "0", "if", "not", "migration", ":", "m", "[", "'create_by'", "]", "=", "dbsUtils", "(", ")", ".", "getCreateBy", "(", ")", "m", "[", "'creation_date'", "]", "=", "dbsUtils", "(", ")", ".", "getTime", "(", ")", "configObj", "=", "{", "\"release_version\"", ":", "m", "[", "\"release_version\"", "]", ",", "\"pset_hash\"", ":", "m", "[", "\"pset_hash\"", "]", ",", "\"pset_name\"", ":", "m", ".", "get", "(", "'pset_name'", ",", "None", ")", ",", "\"app_name\"", ":", "m", "[", "\"app_name\"", "]", ",", "'output_module_label'", ":", "m", "[", "'output_module_label'", "]", ",", "'global_tag'", ":", "m", "[", "'global_tag'", "]", ",", "'scenario'", ":", "m", ".", "get", "(", "'scenario'", ",", "None", ")", ",", "'creation_date'", ":", "m", "[", "'creation_date'", "]", ",", "'create_by'", ":", "m", "[", "'create_by'", "]", "}", "self", ".", "otptModCfgin", ".", "execute", "(", "conn", ",", "configObj", ",", "tran", ")", "tran", ".", "commit", "(", ")", "tran", "=", "None", "except", "KeyError", "as", "ex", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"DBSBlockInsert/insertOutputModuleConfig: \\\n KeyError exception: %s. \"", "%", "ex", ".", "args", "[", "0", "]", ",", "self", ".", "logger", ".", "exception", ",", "\"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. \"", "%", "ex", ".", "args", "[", "0", "]", ")", "except", "exceptions", ".", "IntegrityError", "as", "ex", ":", "#Another job inserted it just 1/100000 second earlier than", "#you!! YG 11/17/2010", "if", "str", "(", "ex", ")", ".", "find", "(", "\"ORA-00001\"", ")", "!=", "-", "1", "or", "str", "(", "ex", ")", ".", "lower", "(", ")", ".", "find", "(", "\"duplicate\"", ")", "!=", "-", "1", ":", "if", "str", "(", "ex", ")", ".", "find", "(", "\"TUC_OMC_1\"", ")", "!=", "-", "1", ":", "#the config is already in db, get the ID later", "pass", "else", ":", "#reinsert it if one or two or three of the three attributes (vresion, hash and app) are inserted", "#just 1/100000 second eailer.", "try", ":", "self", ".", "otptModCfgin", ".", "execute", "(", "conn", ",", "configObj", ",", "tran", ")", "tran", ".", "commit", "(", ")", "tran", "=", "None", "except", "exceptions", ".", "IntegrityError", "as", "ex", ":", "if", "(", "str", "(", "ex", ")", ".", "find", "(", "\"ORA-00001\"", ")", "!=", "-", "1", "and", "str", "(", "ex", ")", ".", "find", "(", "\"TUC_OMC_1\"", ")", ")", "or", "str", "(", "ex", ")", ".", "lower", "(", ")", ".", "find", "(", "\"duplicate\"", ")", "!=", "-", "1", ":", "pass", "else", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "'Invalid data when insert Configure. '", ",", "self", ".", "logger", ".", "exception", ",", "'Invalid data when insert Configure. '", "+", "str", "(", "ex", ")", ")", "elif", "str", "(", "ex", ")", ".", "find", "(", "\"ORA-01400\"", ")", ">", "-", "1", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "dbsExceptionHandler", "(", "\"dbsException-missing-data\"", ",", "\"Missing data when inserting Configure. \"", ",", "self", ".", "logger", ".", "exception", ",", "str", "(", "ex", ")", ")", "else", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "dbsExceptionHandler", "(", "'dbsException-invalid-input2'", ",", "'Invalid data when insert Configure. '", ",", "self", ".", "logger", ".", "exception", ",", "'Invalid data when insert Configure. '", "+", "str", "(", "ex", ")", ")", "except", "exceptions", "as", "ex3", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "ex3", "cfgid", "=", "self", ".", "otptModCfgid", ".", "execute", "(", "conn", ",", "app", "=", "m", "[", "\"app_name\"", "]", ",", "release_version", "=", "m", "[", "\"release_version\"", "]", ",", "pset_hash", "=", "m", "[", "\"pset_hash\"", "]", ",", "output_label", "=", "m", "[", "\"output_module_label\"", "]", ",", "global_tag", "=", "m", "[", "'global_tag'", "]", ")", "otptIdList", ".", "append", "(", "cfgid", ")", "key", "=", "(", "m", "[", "'app_name'", "]", "+", "':'", "+", "m", "[", "'release_version'", "]", "+", "':'", "+", "m", "[", "'pset_hash'", "]", "+", "':'", "+", "m", "[", "'output_module_label'", "]", "+", "':'", "+", "m", "[", "'global_tag'", "]", ")", "self", ".", "datasetCache", "[", "'conf'", "]", "[", "key", "]", "=", "cfgid", "finally", ":", "if", "tran", ":", "tran", ".", "rollback", "(", ")", "if", "conn", ":", "conn", ".", "close", "(", ")", "return", "otptIdList" ]
Insert Release version, application, parameter set hashes and the map(output module config).
[ "Insert", "Release", "version", "application", "parameter", "set", "hashes", "and", "the", "map", "(", "output", "module", "config", ")", "." ]
python
train
gmr/rejected
rejected/mcp.py
https://github.com/gmr/rejected/blob/610a3e1401122ecb98d891b6795cca0255e5b044/rejected/mcp.py#L535-L561
def remove_consumer_process(self, consumer, name): """Remove all details for the specified consumer and process name. :param str consumer: The consumer name :param str name: The process name """ my_pid = os.getpid() if name in self.consumers[consumer].processes.keys(): child = self.consumers[consumer].processes[name] try: alive = child.is_alive() except AssertionError: LOGGER.debug('Tried to test non-child process (%r to %r)', os.getpid(), child.pid) else: if child.pid == my_pid: LOGGER.debug('Child has my pid? %r, %r', my_pid, child.pid) elif alive: try: child.terminate() except OSError: pass try: del self.consumers[consumer].processes[name] except KeyError: pass
[ "def", "remove_consumer_process", "(", "self", ",", "consumer", ",", "name", ")", ":", "my_pid", "=", "os", ".", "getpid", "(", ")", "if", "name", "in", "self", ".", "consumers", "[", "consumer", "]", ".", "processes", ".", "keys", "(", ")", ":", "child", "=", "self", ".", "consumers", "[", "consumer", "]", ".", "processes", "[", "name", "]", "try", ":", "alive", "=", "child", ".", "is_alive", "(", ")", "except", "AssertionError", ":", "LOGGER", ".", "debug", "(", "'Tried to test non-child process (%r to %r)'", ",", "os", ".", "getpid", "(", ")", ",", "child", ".", "pid", ")", "else", ":", "if", "child", ".", "pid", "==", "my_pid", ":", "LOGGER", ".", "debug", "(", "'Child has my pid? %r, %r'", ",", "my_pid", ",", "child", ".", "pid", ")", "elif", "alive", ":", "try", ":", "child", ".", "terminate", "(", ")", "except", "OSError", ":", "pass", "try", ":", "del", "self", ".", "consumers", "[", "consumer", "]", ".", "processes", "[", "name", "]", "except", "KeyError", ":", "pass" ]
Remove all details for the specified consumer and process name. :param str consumer: The consumer name :param str name: The process name
[ "Remove", "all", "details", "for", "the", "specified", "consumer", "and", "process", "name", "." ]
python
train
pandas-dev/pandas
pandas/core/dtypes/concat.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L18-L56
def get_dtype_kinds(l): """ Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays """ typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif isinstance(arr, ABCRangeIndex): typ = 'range' elif is_datetime64tz_dtype(arr): # if to_concat contains different tz, # the result must be object dtype typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_extension_array_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
[ "def", "get_dtype_kinds", "(", "l", ")", ":", "typs", "=", "set", "(", ")", "for", "arr", "in", "l", ":", "dtype", "=", "arr", ".", "dtype", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "typ", "=", "'category'", "elif", "is_sparse", "(", "arr", ")", ":", "typ", "=", "'sparse'", "elif", "isinstance", "(", "arr", ",", "ABCRangeIndex", ")", ":", "typ", "=", "'range'", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "# if to_concat contains different tz,", "# the result must be object dtype", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", ":", "typ", "=", "'datetime'", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "typ", "=", "'timedelta'", "elif", "is_object_dtype", "(", "dtype", ")", ":", "typ", "=", "'object'", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "typ", "=", "'bool'", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "else", ":", "typ", "=", "dtype", ".", "kind", "typs", ".", "add", "(", "typ", ")", "return", "typs" ]
Parameters ---------- l : list of arrays Returns ------- a set of kinds that exist in this list of arrays
[ "Parameters", "----------", "l", ":", "list", "of", "arrays" ]
python
train
PredixDev/predixpy
predix/data/asset.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/data/asset.py#L132-L140
def put_collection(self, collection, body): """ Updates an existing collection. The collection being updated *is* expected to include the id. """ uri = self.uri + '/v1' + collection return self.service._put(uri, body)
[ "def", "put_collection", "(", "self", ",", "collection", ",", "body", ")", ":", "uri", "=", "self", ".", "uri", "+", "'/v1'", "+", "collection", "return", "self", ".", "service", ".", "_put", "(", "uri", ",", "body", ")" ]
Updates an existing collection. The collection being updated *is* expected to include the id.
[ "Updates", "an", "existing", "collection", "." ]
python
train
urinieto/msaf
msaf/algorithms/fmc2d/xmeans.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L18-L82
def estimate_K_xmeans(self, th=0.2, maxK = 10): """Estimates K running X-means algorithm (Pelleg & Moore, 2000).""" # Run initial K-means means, labels = self.run_kmeans(self.X, self.init_K) # Run X-means algorithm stop = False curr_K = self.init_K while not stop: stop = True final_means = [] for k in range(curr_K): # Find the data that corresponds to the k-th cluster D = self.get_clustered_data(self.X, labels, k) if len(D) == 0 or D.shape[0] == 1: continue # Whiten and find whitened mean stdD = np.std(D, axis=0) #D = vq.whiten(D) D /= float(stdD) # Same as line above mean = D.mean(axis=0) # Cluster this subspace by half (K=2) half_means, half_labels = self.run_kmeans(D, K=2) # Compute BICs bic1 = self.compute_bic(D, [mean], K=1, labels=np.zeros(D.shape[0]), R=D.shape[0]) bic2 = self.compute_bic(D, half_means, K=2, labels=half_labels, R=D.shape[0]) # Split or not max_bic = np.max([np.abs(bic1), np.abs(bic2)]) norm_bic1 = bic1 / float(max_bic) norm_bic2 = bic2 / float(max_bic) diff_bic = np.abs(norm_bic1 - norm_bic2) # Split! #print "diff_bic", diff_bic if diff_bic > th: final_means.append(half_means[0] * stdD) final_means.append(half_means[1] * stdD) curr_K += 1 stop = False # Don't split else: final_means.append(mean * stdD) final_means = np.asarray(final_means) #print "Estimated K: ", curr_K if self.plot: plt.scatter(self.X[:, 0], self.X[:, 1]) plt.scatter(final_means[:, 0], final_means[:, 1], color="y") plt.show() if curr_K >= maxK or self.X.shape[-1] != final_means.shape[-1]: stop = True else: labels, dist = vq.vq(self.X, final_means) return curr_K
[ "def", "estimate_K_xmeans", "(", "self", ",", "th", "=", "0.2", ",", "maxK", "=", "10", ")", ":", "# Run initial K-means", "means", ",", "labels", "=", "self", ".", "run_kmeans", "(", "self", ".", "X", ",", "self", ".", "init_K", ")", "# Run X-means algorithm", "stop", "=", "False", "curr_K", "=", "self", ".", "init_K", "while", "not", "stop", ":", "stop", "=", "True", "final_means", "=", "[", "]", "for", "k", "in", "range", "(", "curr_K", ")", ":", "# Find the data that corresponds to the k-th cluster", "D", "=", "self", ".", "get_clustered_data", "(", "self", ".", "X", ",", "labels", ",", "k", ")", "if", "len", "(", "D", ")", "==", "0", "or", "D", ".", "shape", "[", "0", "]", "==", "1", ":", "continue", "# Whiten and find whitened mean", "stdD", "=", "np", ".", "std", "(", "D", ",", "axis", "=", "0", ")", "#D = vq.whiten(D)", "D", "/=", "float", "(", "stdD", ")", "# Same as line above", "mean", "=", "D", ".", "mean", "(", "axis", "=", "0", ")", "# Cluster this subspace by half (K=2)", "half_means", ",", "half_labels", "=", "self", ".", "run_kmeans", "(", "D", ",", "K", "=", "2", ")", "# Compute BICs", "bic1", "=", "self", ".", "compute_bic", "(", "D", ",", "[", "mean", "]", ",", "K", "=", "1", ",", "labels", "=", "np", ".", "zeros", "(", "D", ".", "shape", "[", "0", "]", ")", ",", "R", "=", "D", ".", "shape", "[", "0", "]", ")", "bic2", "=", "self", ".", "compute_bic", "(", "D", ",", "half_means", ",", "K", "=", "2", ",", "labels", "=", "half_labels", ",", "R", "=", "D", ".", "shape", "[", "0", "]", ")", "# Split or not", "max_bic", "=", "np", ".", "max", "(", "[", "np", ".", "abs", "(", "bic1", ")", ",", "np", ".", "abs", "(", "bic2", ")", "]", ")", "norm_bic1", "=", "bic1", "/", "float", "(", "max_bic", ")", "norm_bic2", "=", "bic2", "/", "float", "(", "max_bic", ")", "diff_bic", "=", "np", ".", "abs", "(", "norm_bic1", "-", "norm_bic2", ")", "# Split!", "#print \"diff_bic\", diff_bic", "if", "diff_bic", ">", "th", ":", "final_means", ".", "append", "(", "half_means", "[", "0", "]", "*", "stdD", ")", "final_means", ".", "append", "(", "half_means", "[", "1", "]", "*", "stdD", ")", "curr_K", "+=", "1", "stop", "=", "False", "# Don't split", "else", ":", "final_means", ".", "append", "(", "mean", "*", "stdD", ")", "final_means", "=", "np", ".", "asarray", "(", "final_means", ")", "#print \"Estimated K: \", curr_K", "if", "self", ".", "plot", ":", "plt", ".", "scatter", "(", "self", ".", "X", "[", ":", ",", "0", "]", ",", "self", ".", "X", "[", ":", ",", "1", "]", ")", "plt", ".", "scatter", "(", "final_means", "[", ":", ",", "0", "]", ",", "final_means", "[", ":", ",", "1", "]", ",", "color", "=", "\"y\"", ")", "plt", ".", "show", "(", ")", "if", "curr_K", ">=", "maxK", "or", "self", ".", "X", ".", "shape", "[", "-", "1", "]", "!=", "final_means", ".", "shape", "[", "-", "1", "]", ":", "stop", "=", "True", "else", ":", "labels", ",", "dist", "=", "vq", ".", "vq", "(", "self", ".", "X", ",", "final_means", ")", "return", "curr_K" ]
Estimates K running X-means algorithm (Pelleg & Moore, 2000).
[ "Estimates", "K", "running", "X", "-", "means", "algorithm", "(", "Pelleg", "&", "Moore", "2000", ")", "." ]
python
test
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L2028-L2052
def dafrda(handle, begin, end): """ Read the double precision data bounded by two addresses within a DAF. Deprecated: This routine has been superseded by :func:`dafgda` and :func:`dafgsr`. This routine is supported for purposes of backward compatibility only. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafrda_c.html :param handle: Handle of a DAF. :type handle: int :param begin: Initial address within file. :type begin: int :param end: Final address within file. :type end: int :return: Data contained between begin and end. :rtype: Array of floats """ handle = ctypes.c_int(handle) begin = ctypes.c_int(begin) end = ctypes.c_int(end) data = stypes.emptyDoubleVector(1 + end.value - begin.value) libspice.dafrda_c(handle, begin, end, data) return stypes.cVectorToPython(data)
[ "def", "dafrda", "(", "handle", ",", "begin", ",", "end", ")", ":", "handle", "=", "ctypes", ".", "c_int", "(", "handle", ")", "begin", "=", "ctypes", ".", "c_int", "(", "begin", ")", "end", "=", "ctypes", ".", "c_int", "(", "end", ")", "data", "=", "stypes", ".", "emptyDoubleVector", "(", "1", "+", "end", ".", "value", "-", "begin", ".", "value", ")", "libspice", ".", "dafrda_c", "(", "handle", ",", "begin", ",", "end", ",", "data", ")", "return", "stypes", ".", "cVectorToPython", "(", "data", ")" ]
Read the double precision data bounded by two addresses within a DAF. Deprecated: This routine has been superseded by :func:`dafgda` and :func:`dafgsr`. This routine is supported for purposes of backward compatibility only. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafrda_c.html :param handle: Handle of a DAF. :type handle: int :param begin: Initial address within file. :type begin: int :param end: Final address within file. :type end: int :return: Data contained between begin and end. :rtype: Array of floats
[ "Read", "the", "double", "precision", "data", "bounded", "by", "two", "addresses", "within", "a", "DAF", "." ]
python
train
lingthio/Flask-User
flask_user/db_adapters/dynamo_db_adapter.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/db_adapters/dynamo_db_adapter.py#L39-L50
def get_object(self, ObjectClass, id): """ Retrieve object of type ``ObjectClass`` by ``id``. | Returns object on success. | Returns None otherwise. """ print('dynamo.get(%s, %s)' % (ObjectClass, str(id))) resp = self.db.engine.get(ObjectClass, [id]) if resp: return resp[0] else: return None
[ "def", "get_object", "(", "self", ",", "ObjectClass", ",", "id", ")", ":", "print", "(", "'dynamo.get(%s, %s)'", "%", "(", "ObjectClass", ",", "str", "(", "id", ")", ")", ")", "resp", "=", "self", ".", "db", ".", "engine", ".", "get", "(", "ObjectClass", ",", "[", "id", "]", ")", "if", "resp", ":", "return", "resp", "[", "0", "]", "else", ":", "return", "None" ]
Retrieve object of type ``ObjectClass`` by ``id``. | Returns object on success. | Returns None otherwise.
[ "Retrieve", "object", "of", "type", "ObjectClass", "by", "id", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/color/functions.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/color/functions.py#L57-L60
def rgb_to_hsv(r, g, b): """Convert RGB color to HSV.""" h, s, v = colorsys.rgb_to_hsv(r / 255., g / 255., b / 255.) return round2(h * 360), round2(s * 100), round2(v * 100)
[ "def", "rgb_to_hsv", "(", "r", ",", "g", ",", "b", ")", ":", "h", ",", "s", ",", "v", "=", "colorsys", ".", "rgb_to_hsv", "(", "r", "/", "255.", ",", "g", "/", "255.", ",", "b", "/", "255.", ")", "return", "round2", "(", "h", "*", "360", ")", ",", "round2", "(", "s", "*", "100", ")", ",", "round2", "(", "v", "*", "100", ")" ]
Convert RGB color to HSV.
[ "Convert", "RGB", "color", "to", "HSV", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/conventions/relation.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/relation.py#L252-L258
def configure_relation(graph, ns, mappings): """ Register relation endpoint(s) between two resources. """ convention = RelationConvention(graph) convention.configure(ns, mappings)
[ "def", "configure_relation", "(", "graph", ",", "ns", ",", "mappings", ")", ":", "convention", "=", "RelationConvention", "(", "graph", ")", "convention", ".", "configure", "(", "ns", ",", "mappings", ")" ]
Register relation endpoint(s) between two resources.
[ "Register", "relation", "endpoint", "(", "s", ")", "between", "two", "resources", "." ]
python
train
pudo/jsongraph
jsongraph/query.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/query.py#L132-L152
def query(self, parents=None): """ Compose the query and generate SPARQL. """ # TODO: benchmark single-query strategy q = Select([]) q = self.project(q, parent=True) q = self.filter(q, parents=parents) if self.parent is None: subq = Select([self.var]) subq = self.filter(subq, parents=parents) subq = subq.offset(self.node.offset) subq = subq.limit(self.node.limit) subq = subq.distinct() # TODO: sorting. subq = subq.order_by(desc(self.var)) q = q.where(subq) # if hasattr(self.context, 'identifier'): # q._where = graph(self.context.identifier, q._where) log.debug("Compiled query: %r", q.compile()) return q
[ "def", "query", "(", "self", ",", "parents", "=", "None", ")", ":", "# TODO: benchmark single-query strategy", "q", "=", "Select", "(", "[", "]", ")", "q", "=", "self", ".", "project", "(", "q", ",", "parent", "=", "True", ")", "q", "=", "self", ".", "filter", "(", "q", ",", "parents", "=", "parents", ")", "if", "self", ".", "parent", "is", "None", ":", "subq", "=", "Select", "(", "[", "self", ".", "var", "]", ")", "subq", "=", "self", ".", "filter", "(", "subq", ",", "parents", "=", "parents", ")", "subq", "=", "subq", ".", "offset", "(", "self", ".", "node", ".", "offset", ")", "subq", "=", "subq", ".", "limit", "(", "self", ".", "node", ".", "limit", ")", "subq", "=", "subq", ".", "distinct", "(", ")", "# TODO: sorting.", "subq", "=", "subq", ".", "order_by", "(", "desc", "(", "self", ".", "var", ")", ")", "q", "=", "q", ".", "where", "(", "subq", ")", "# if hasattr(self.context, 'identifier'):", "# q._where = graph(self.context.identifier, q._where)", "log", ".", "debug", "(", "\"Compiled query: %r\"", ",", "q", ".", "compile", "(", ")", ")", "return", "q" ]
Compose the query and generate SPARQL.
[ "Compose", "the", "query", "and", "generate", "SPARQL", "." ]
python
train
TankerHQ/python-cli-ui
cli_ui/__init__.py
https://github.com/TankerHQ/python-cli-ui/blob/4c9928827cea06cf80e6a1f5bd86478d8566863f/cli_ui/__init__.py#L538-L554
def ask_yes_no(*question: Token, default: bool = False) -> bool: """Ask the user to answer by yes or no""" while True: tokens = [green, "::", reset] + list(question) + [reset] if default: tokens.append("(Y/n)") else: tokens.append("(y/N)") info(*tokens) answer = read_input() if answer.lower() in ["y", "yes"]: return True if answer.lower() in ["n", "no"]: return False if not answer: return default warning("Please answer by 'y' (yes) or 'n' (no) ")
[ "def", "ask_yes_no", "(", "*", "question", ":", "Token", ",", "default", ":", "bool", "=", "False", ")", "->", "bool", ":", "while", "True", ":", "tokens", "=", "[", "green", ",", "\"::\"", ",", "reset", "]", "+", "list", "(", "question", ")", "+", "[", "reset", "]", "if", "default", ":", "tokens", ".", "append", "(", "\"(Y/n)\"", ")", "else", ":", "tokens", ".", "append", "(", "\"(y/N)\"", ")", "info", "(", "*", "tokens", ")", "answer", "=", "read_input", "(", ")", "if", "answer", ".", "lower", "(", ")", "in", "[", "\"y\"", ",", "\"yes\"", "]", ":", "return", "True", "if", "answer", ".", "lower", "(", ")", "in", "[", "\"n\"", ",", "\"no\"", "]", ":", "return", "False", "if", "not", "answer", ":", "return", "default", "warning", "(", "\"Please answer by 'y' (yes) or 'n' (no) \"", ")" ]
Ask the user to answer by yes or no
[ "Ask", "the", "user", "to", "answer", "by", "yes", "or", "no" ]
python
train
chrippa/ds4drv
ds4drv/actions/binding.py
https://github.com/chrippa/ds4drv/blob/be7327fc3f5abb8717815f2a1a2ad3d335535d8a/ds4drv/actions/binding.py#L120-L130
def exec_background(controller, cmd, *args): """Executes a subprocess in the background.""" controller.logger.info("Executing in the background: {0} {1}", cmd, " ".join(args)) try: subprocess.Popen([cmd] + list(args), stdout=open(os.devnull, "wb"), stderr=open(os.devnull, "wb")) except OSError as err: controller.logger.error("Failed to execute process: {0}", err)
[ "def", "exec_background", "(", "controller", ",", "cmd", ",", "*", "args", ")", ":", "controller", ".", "logger", ".", "info", "(", "\"Executing in the background: {0} {1}\"", ",", "cmd", ",", "\" \"", ".", "join", "(", "args", ")", ")", "try", ":", "subprocess", ".", "Popen", "(", "[", "cmd", "]", "+", "list", "(", "args", ")", ",", "stdout", "=", "open", "(", "os", ".", "devnull", ",", "\"wb\"", ")", ",", "stderr", "=", "open", "(", "os", ".", "devnull", ",", "\"wb\"", ")", ")", "except", "OSError", "as", "err", ":", "controller", ".", "logger", ".", "error", "(", "\"Failed to execute process: {0}\"", ",", "err", ")" ]
Executes a subprocess in the background.
[ "Executes", "a", "subprocess", "in", "the", "background", "." ]
python
train
jonbretman/jinja-to-js
jinja_to_js/__init__.py
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L251-L268
def get_output(self): """ Returns the generated JavaScript code. Returns: str """ # generate the JS function string template_function = TEMPLATE_WRAPPER.format( function_name=self.js_function_name, template_code=self.output.getvalue() ).strip() # get the correct module format template module_format = JS_MODULE_FORMATS[self.js_module_format] # generate the module code return module_format(self.dependencies, template_function)
[ "def", "get_output", "(", "self", ")", ":", "# generate the JS function string", "template_function", "=", "TEMPLATE_WRAPPER", ".", "format", "(", "function_name", "=", "self", ".", "js_function_name", ",", "template_code", "=", "self", ".", "output", ".", "getvalue", "(", ")", ")", ".", "strip", "(", ")", "# get the correct module format template", "module_format", "=", "JS_MODULE_FORMATS", "[", "self", ".", "js_module_format", "]", "# generate the module code", "return", "module_format", "(", "self", ".", "dependencies", ",", "template_function", ")" ]
Returns the generated JavaScript code. Returns: str
[ "Returns", "the", "generated", "JavaScript", "code", "." ]
python
train
mozilla-services/amo2kinto
amo2kinto/exporter.py
https://github.com/mozilla-services/amo2kinto/blob/1ec40647e77cf89badbea4a58d328243daed49a9/amo2kinto/exporter.py#L326-L352
def write_cert_items(xml_tree, records, api_ver=3, app_id=None, app_ver=None): """Generate the certificate blocklists. <certItem issuerName="MIGQMQswCQYD...IENB"> <serialNumber>UoRGnb96CUDTxIqVry6LBg==</serialNumber> </certItem> or <certItem subject='MCIxIDAeBgNVBAMMF0Fub3RoZXIgVGVzdCBFbmQtZW50aXR5' pubKeyHash='VCIlmPM9NkgFQtrs4Oa5TeFcDu6MWRTKSNdePEhOgD8='> </certItem> """ if not records or not should_include_certs(app_id, app_ver): return certItems = etree.SubElement(xml_tree, 'certItems') for item in records: if item.get('subject') and item.get('pubKeyHash'): cert = etree.SubElement(certItems, 'certItem', subject=item['subject'], pubKeyHash=item['pubKeyHash']) else: cert = etree.SubElement(certItems, 'certItem', issuerName=item['issuerName']) serialNumber = etree.SubElement(cert, 'serialNumber') serialNumber.text = item['serialNumber']
[ "def", "write_cert_items", "(", "xml_tree", ",", "records", ",", "api_ver", "=", "3", ",", "app_id", "=", "None", ",", "app_ver", "=", "None", ")", ":", "if", "not", "records", "or", "not", "should_include_certs", "(", "app_id", ",", "app_ver", ")", ":", "return", "certItems", "=", "etree", ".", "SubElement", "(", "xml_tree", ",", "'certItems'", ")", "for", "item", "in", "records", ":", "if", "item", ".", "get", "(", "'subject'", ")", "and", "item", ".", "get", "(", "'pubKeyHash'", ")", ":", "cert", "=", "etree", ".", "SubElement", "(", "certItems", ",", "'certItem'", ",", "subject", "=", "item", "[", "'subject'", "]", ",", "pubKeyHash", "=", "item", "[", "'pubKeyHash'", "]", ")", "else", ":", "cert", "=", "etree", ".", "SubElement", "(", "certItems", ",", "'certItem'", ",", "issuerName", "=", "item", "[", "'issuerName'", "]", ")", "serialNumber", "=", "etree", ".", "SubElement", "(", "cert", ",", "'serialNumber'", ")", "serialNumber", ".", "text", "=", "item", "[", "'serialNumber'", "]" ]
Generate the certificate blocklists. <certItem issuerName="MIGQMQswCQYD...IENB"> <serialNumber>UoRGnb96CUDTxIqVry6LBg==</serialNumber> </certItem> or <certItem subject='MCIxIDAeBgNVBAMMF0Fub3RoZXIgVGVzdCBFbmQtZW50aXR5' pubKeyHash='VCIlmPM9NkgFQtrs4Oa5TeFcDu6MWRTKSNdePEhOgD8='> </certItem>
[ "Generate", "the", "certificate", "blocklists", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/models/ecm.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/ecm.py#L339-L357
def conv_cy(self, cy_cl): """Convert cycles (cy/CL) to other units, such as FLOP/s or It/s.""" if not isinstance(cy_cl, PrefixedUnit): cy_cl = PrefixedUnit(cy_cl, '', 'cy/CL') clock = self.machine['clock'] element_size = self.kernel.datatypes_size[self.kernel.datatype] elements_per_cacheline = int(self.machine['cacheline size']) // element_size it_s = clock/cy_cl*elements_per_cacheline it_s.unit = 'It/s' flops_per_it = sum(self.kernel._flops.values()) performance = it_s*flops_per_it performance.unit = 'FLOP/s' cy_it = cy_cl*elements_per_cacheline cy_it.unit = 'cy/It' return {'It/s': it_s, 'cy/CL': cy_cl, 'cy/It': cy_it, 'FLOP/s': performance}
[ "def", "conv_cy", "(", "self", ",", "cy_cl", ")", ":", "if", "not", "isinstance", "(", "cy_cl", ",", "PrefixedUnit", ")", ":", "cy_cl", "=", "PrefixedUnit", "(", "cy_cl", ",", "''", ",", "'cy/CL'", ")", "clock", "=", "self", ".", "machine", "[", "'clock'", "]", "element_size", "=", "self", ".", "kernel", ".", "datatypes_size", "[", "self", ".", "kernel", ".", "datatype", "]", "elements_per_cacheline", "=", "int", "(", "self", ".", "machine", "[", "'cacheline size'", "]", ")", "//", "element_size", "it_s", "=", "clock", "/", "cy_cl", "*", "elements_per_cacheline", "it_s", ".", "unit", "=", "'It/s'", "flops_per_it", "=", "sum", "(", "self", ".", "kernel", ".", "_flops", ".", "values", "(", ")", ")", "performance", "=", "it_s", "*", "flops_per_it", "performance", ".", "unit", "=", "'FLOP/s'", "cy_it", "=", "cy_cl", "*", "elements_per_cacheline", "cy_it", ".", "unit", "=", "'cy/It'", "return", "{", "'It/s'", ":", "it_s", ",", "'cy/CL'", ":", "cy_cl", ",", "'cy/It'", ":", "cy_it", ",", "'FLOP/s'", ":", "performance", "}" ]
Convert cycles (cy/CL) to other units, such as FLOP/s or It/s.
[ "Convert", "cycles", "(", "cy", "/", "CL", ")", "to", "other", "units", "such", "as", "FLOP", "/", "s", "or", "It", "/", "s", "." ]
python
test
IdentityPython/SATOSA
src/satosa/proxy_server.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/proxy_server.py#L31-L45
def unpack_post(environ, content_length): """ Unpacks a post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters. """ post_body = environ['wsgi.input'].read(content_length).decode("utf-8") data = None if "application/x-www-form-urlencoded" in environ["CONTENT_TYPE"]: data = dict(parse_qsl(post_body)) elif "application/json" in environ["CONTENT_TYPE"]: data = json.loads(post_body) logger.debug("unpack_post:: %s", data) return data
[ "def", "unpack_post", "(", "environ", ",", "content_length", ")", ":", "post_body", "=", "environ", "[", "'wsgi.input'", "]", ".", "read", "(", "content_length", ")", ".", "decode", "(", "\"utf-8\"", ")", "data", "=", "None", "if", "\"application/x-www-form-urlencoded\"", "in", "environ", "[", "\"CONTENT_TYPE\"", "]", ":", "data", "=", "dict", "(", "parse_qsl", "(", "post_body", ")", ")", "elif", "\"application/json\"", "in", "environ", "[", "\"CONTENT_TYPE\"", "]", ":", "data", "=", "json", ".", "loads", "(", "post_body", ")", "logger", ".", "debug", "(", "\"unpack_post:: %s\"", ",", "data", ")", "return", "data" ]
Unpacks a post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters.
[ "Unpacks", "a", "post", "request", "query", "string", ".", ":", "param", "environ", ":", "whiskey", "application", "environment", ".", ":", "return", ":", "A", "dictionary", "with", "parameters", "." ]
python
train
jpscaletti/solution
solution/fields/file/image.py
https://github.com/jpscaletti/solution/blob/eabafd8e695bbb0209242e002dbcc05ffb327f43/solution/fields/file/image.py#L22-L30
def clean(self, value): """Passes the value to FileField and resizes the image at the path the parent returns if needed. """ path = super(Image, self).clean(value) if path and self.size: self.resize_image(join(self.base_path, path)) return path
[ "def", "clean", "(", "self", ",", "value", ")", ":", "path", "=", "super", "(", "Image", ",", "self", ")", ".", "clean", "(", "value", ")", "if", "path", "and", "self", ".", "size", ":", "self", ".", "resize_image", "(", "join", "(", "self", ".", "base_path", ",", "path", ")", ")", "return", "path" ]
Passes the value to FileField and resizes the image at the path the parent returns if needed.
[ "Passes", "the", "value", "to", "FileField", "and", "resizes", "the", "image", "at", "the", "path", "the", "parent", "returns", "if", "needed", "." ]
python
train
jorgenschaefer/elpy
elpy/rpc.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/rpc.py#L59-L68
def read_json(self): """Read a single line and decode it as JSON. Can raise an EOFError() when the input source was closed. """ line = self.stdin.readline() if line == '': raise EOFError() return json.loads(line)
[ "def", "read_json", "(", "self", ")", ":", "line", "=", "self", ".", "stdin", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "raise", "EOFError", "(", ")", "return", "json", ".", "loads", "(", "line", ")" ]
Read a single line and decode it as JSON. Can raise an EOFError() when the input source was closed.
[ "Read", "a", "single", "line", "and", "decode", "it", "as", "JSON", "." ]
python
train
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L570-L602
def findfivo(ol,*args,**kwargs): ''' #findfivo f,i,v,o四元决定 fivo-4-tuple-engine #cond_func diff_func(index,value,*diff_args) ''' args = list(args) lngth = args.__len__() if(lngth==0): diff_funcs_arr = kwargs['cond_funcs'] diff_args_arr = kwargs['cond_func_args_array'] elif(lngth==1): if('cond_func_args_array' in kwargs): diff_funcs_arr = args[0] diff_args_arr = kwargs['cond_func_args_array'] else: diff_funcs_arr = kwargs['cond_funcs'] diff_args_arr = args[0] else: diff_funcs_arr = args[0] diff_args_arr = args[1] lngth = ol.__len__() rslt = [] for i in range(0,lngth): index = i value = ol[i] func = diff_funcs_arr[i] args = diff_args_arr[i] cond = func(index,value,*args) if(cond): rslt.append((index,value)) else: pass return(rslt)
[ "def", "findfivo", "(", "ol", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "lngth", "=", "args", ".", "__len__", "(", ")", "if", "(", "lngth", "==", "0", ")", ":", "diff_funcs_arr", "=", "kwargs", "[", "'cond_funcs'", "]", "diff_args_arr", "=", "kwargs", "[", "'cond_func_args_array'", "]", "elif", "(", "lngth", "==", "1", ")", ":", "if", "(", "'cond_func_args_array'", "in", "kwargs", ")", ":", "diff_funcs_arr", "=", "args", "[", "0", "]", "diff_args_arr", "=", "kwargs", "[", "'cond_func_args_array'", "]", "else", ":", "diff_funcs_arr", "=", "kwargs", "[", "'cond_funcs'", "]", "diff_args_arr", "=", "args", "[", "0", "]", "else", ":", "diff_funcs_arr", "=", "args", "[", "0", "]", "diff_args_arr", "=", "args", "[", "1", "]", "lngth", "=", "ol", ".", "__len__", "(", ")", "rslt", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "lngth", ")", ":", "index", "=", "i", "value", "=", "ol", "[", "i", "]", "func", "=", "diff_funcs_arr", "[", "i", "]", "args", "=", "diff_args_arr", "[", "i", "]", "cond", "=", "func", "(", "index", ",", "value", ",", "*", "args", ")", "if", "(", "cond", ")", ":", "rslt", ".", "append", "(", "(", "index", ",", "value", ")", ")", "else", ":", "pass", "return", "(", "rslt", ")" ]
#findfivo f,i,v,o四元决定 fivo-4-tuple-engine #cond_func diff_func(index,value,*diff_args)
[ "#findfivo", "f", "i", "v", "o四元决定", "fivo", "-", "4", "-", "tuple", "-", "engine", "#cond_func", "diff_func", "(", "index", "value", "*", "diff_args", ")" ]
python
valid
datastax/python-driver
cassandra/cqlengine/query.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L1507-L1518
def delete(self): """ Deletes one instance """ if self.instance is None: raise CQLEngineException("DML Query instance attribute is None") ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, col in self.model._primary_keys.items(): val = getattr(self.instance, name) if val is None and not col.partition_key: continue ds.add_where(col, EqualsOperator(), val) self._execute(ds)
[ "def", "delete", "(", "self", ")", ":", "if", "self", ".", "instance", "is", "None", ":", "raise", "CQLEngineException", "(", "\"DML Query instance attribute is None\"", ")", "ds", "=", "DeleteStatement", "(", "self", ".", "column_family_name", ",", "timestamp", "=", "self", ".", "_timestamp", ",", "conditionals", "=", "self", ".", "_conditional", ",", "if_exists", "=", "self", ".", "_if_exists", ")", "for", "name", ",", "col", "in", "self", ".", "model", ".", "_primary_keys", ".", "items", "(", ")", ":", "val", "=", "getattr", "(", "self", ".", "instance", ",", "name", ")", "if", "val", "is", "None", "and", "not", "col", ".", "partition_key", ":", "continue", "ds", ".", "add_where", "(", "col", ",", "EqualsOperator", "(", ")", ",", "val", ")", "self", ".", "_execute", "(", "ds", ")" ]
Deletes one instance
[ "Deletes", "one", "instance" ]
python
train
alkivi-sas/python-alkivi-logger
alkivi/logger/logger.py
https://github.com/alkivi-sas/python-alkivi-logger/blob/e96d5a987a5c8789c51d4fa7541709e05b1f51e1/alkivi/logger/logger.py#L227-L234
def _get_handler(self, handler_class): """Return an existing class of handler.""" element = None for handler in self.handlers: if isinstance(handler, handler_class): element = handler break return element
[ "def", "_get_handler", "(", "self", ",", "handler_class", ")", ":", "element", "=", "None", "for", "handler", "in", "self", ".", "handlers", ":", "if", "isinstance", "(", "handler", ",", "handler_class", ")", ":", "element", "=", "handler", "break", "return", "element" ]
Return an existing class of handler.
[ "Return", "an", "existing", "class", "of", "handler", "." ]
python
train
sassoo/goldman
goldman/queryparams/page.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/page.py#L78-L87
def last(self): """ Generate query parameters for the last page """ if self.limit > self.total: return None elif self.offset >= self.total: return None else: offset = (self.total / self.limit) * self.limit return {'page[offset]': offset, 'page[limit]': self.limit}
[ "def", "last", "(", "self", ")", ":", "if", "self", ".", "limit", ">", "self", ".", "total", ":", "return", "None", "elif", "self", ".", "offset", ">=", "self", ".", "total", ":", "return", "None", "else", ":", "offset", "=", "(", "self", ".", "total", "/", "self", ".", "limit", ")", "*", "self", ".", "limit", "return", "{", "'page[offset]'", ":", "offset", ",", "'page[limit]'", ":", "self", ".", "limit", "}" ]
Generate query parameters for the last page
[ "Generate", "query", "parameters", "for", "the", "last", "page" ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/abstract_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/abstract_algebra.py#L629-L639
def free_symbols(self): """Set of free SymPy symbols contained within the expression.""" if self._free_symbols is None: res = set.union( set([]), # dummy arg (union fails without arguments) *[_free_symbols(val) for val in self.kwargs.values()]) res.update( set([]), # dummy arg (update fails without arguments) *[_free_symbols(arg) for arg in self.args]) self._free_symbols = res return self._free_symbols
[ "def", "free_symbols", "(", "self", ")", ":", "if", "self", ".", "_free_symbols", "is", "None", ":", "res", "=", "set", ".", "union", "(", "set", "(", "[", "]", ")", ",", "# dummy arg (union fails without arguments)", "*", "[", "_free_symbols", "(", "val", ")", "for", "val", "in", "self", ".", "kwargs", ".", "values", "(", ")", "]", ")", "res", ".", "update", "(", "set", "(", "[", "]", ")", ",", "# dummy arg (update fails without arguments)", "*", "[", "_free_symbols", "(", "arg", ")", "for", "arg", "in", "self", ".", "args", "]", ")", "self", ".", "_free_symbols", "=", "res", "return", "self", ".", "_free_symbols" ]
Set of free SymPy symbols contained within the expression.
[ "Set", "of", "free", "SymPy", "symbols", "contained", "within", "the", "expression", "." ]
python
train
tylerbutler/engineer
engineer/devtools/theme_tools.py
https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L47-L65
def list_theme(): """List all available Engineer themes.""" from engineer.themes import ThemeManager themes = ThemeManager.themes() col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()])) themes = ThemeManager.themes_by_finder() for finder in sorted(themes.iterkeys()): if len(themes[finder]) > 0: puts("%s: " % finder) for theme in sorted(themes[finder], key=lambda _: _.id): with indent(4): puts( columns( [colored.cyan("%s:" % theme.id), col1], [colored.white(theme.root_path, bold=True), col2] ) )
[ "def", "list_theme", "(", ")", ":", "from", "engineer", ".", "themes", "import", "ThemeManager", "themes", "=", "ThemeManager", ".", "themes", "(", ")", "col1", ",", "col2", "=", "map", "(", "max", ",", "zip", "(", "*", "[", "(", "len", "(", "t", ".", "id", ")", "+", "2", ",", "len", "(", "t", ".", "root_path", ")", "+", "2", ")", "for", "t", "in", "themes", ".", "itervalues", "(", ")", "]", ")", ")", "themes", "=", "ThemeManager", ".", "themes_by_finder", "(", ")", "for", "finder", "in", "sorted", "(", "themes", ".", "iterkeys", "(", ")", ")", ":", "if", "len", "(", "themes", "[", "finder", "]", ")", ">", "0", ":", "puts", "(", "\"%s: \"", "%", "finder", ")", "for", "theme", "in", "sorted", "(", "themes", "[", "finder", "]", ",", "key", "=", "lambda", "_", ":", "_", ".", "id", ")", ":", "with", "indent", "(", "4", ")", ":", "puts", "(", "columns", "(", "[", "colored", ".", "cyan", "(", "\"%s:\"", "%", "theme", ".", "id", ")", ",", "col1", "]", ",", "[", "colored", ".", "white", "(", "theme", ".", "root_path", ",", "bold", "=", "True", ")", ",", "col2", "]", ")", ")" ]
List all available Engineer themes.
[ "List", "all", "available", "Engineer", "themes", "." ]
python
train
erigones/zabbix-api
zabbix_api.py
https://github.com/erigones/zabbix-api/blob/2474ab1d1ddb46c26eea70671b3a599b836d42da/zabbix_api.py#L333-L341
def relogin(self): """Perform a re-login""" try: self.__auth = None # reset auth before relogin self.login() except ZabbixAPIException as e: self.log(ERROR, 'Zabbix API relogin error (%s)', e) self.__auth = None # logged_in() will always return False raise
[ "def", "relogin", "(", "self", ")", ":", "try", ":", "self", ".", "__auth", "=", "None", "# reset auth before relogin", "self", ".", "login", "(", ")", "except", "ZabbixAPIException", "as", "e", ":", "self", ".", "log", "(", "ERROR", ",", "'Zabbix API relogin error (%s)'", ",", "e", ")", "self", ".", "__auth", "=", "None", "# logged_in() will always return False", "raise" ]
Perform a re-login
[ "Perform", "a", "re", "-", "login" ]
python
train
partofthething/ace
ace/supersmoother.py
https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/supersmoother.py#L93-L111
def _enhance_bass(self): """Update best span choices with bass enhancement as requested by user (Eq. 11).""" if not self._bass_enhancement: # like in supsmu, skip if alpha=0 return bass_span = DEFAULT_SPANS[BASS_INDEX] enhanced_spans = [] for i, best_span_here in enumerate(self._best_span_at_each_point): best_smooth_index = DEFAULT_SPANS.index(best_span_here) best_span = DEFAULT_SPANS[best_smooth_index] best_span_residual = self._residual_smooths[best_smooth_index][i] bass_span_residual = self._residual_smooths[BASS_INDEX][i] if 0 < best_span_residual < bass_span_residual: ri = best_span_residual / bass_span_residual bass_factor = ri ** (10.0 - self._bass_enhancement) enhanced_spans.append(best_span + (bass_span - best_span) * bass_factor) else: enhanced_spans.append(best_span) self._best_span_at_each_point = enhanced_spans
[ "def", "_enhance_bass", "(", "self", ")", ":", "if", "not", "self", ".", "_bass_enhancement", ":", "# like in supsmu, skip if alpha=0", "return", "bass_span", "=", "DEFAULT_SPANS", "[", "BASS_INDEX", "]", "enhanced_spans", "=", "[", "]", "for", "i", ",", "best_span_here", "in", "enumerate", "(", "self", ".", "_best_span_at_each_point", ")", ":", "best_smooth_index", "=", "DEFAULT_SPANS", ".", "index", "(", "best_span_here", ")", "best_span", "=", "DEFAULT_SPANS", "[", "best_smooth_index", "]", "best_span_residual", "=", "self", ".", "_residual_smooths", "[", "best_smooth_index", "]", "[", "i", "]", "bass_span_residual", "=", "self", ".", "_residual_smooths", "[", "BASS_INDEX", "]", "[", "i", "]", "if", "0", "<", "best_span_residual", "<", "bass_span_residual", ":", "ri", "=", "best_span_residual", "/", "bass_span_residual", "bass_factor", "=", "ri", "**", "(", "10.0", "-", "self", ".", "_bass_enhancement", ")", "enhanced_spans", ".", "append", "(", "best_span", "+", "(", "bass_span", "-", "best_span", ")", "*", "bass_factor", ")", "else", ":", "enhanced_spans", ".", "append", "(", "best_span", ")", "self", ".", "_best_span_at_each_point", "=", "enhanced_spans" ]
Update best span choices with bass enhancement as requested by user (Eq. 11).
[ "Update", "best", "span", "choices", "with", "bass", "enhancement", "as", "requested", "by", "user", "(", "Eq", ".", "11", ")", "." ]
python
train
oisinmulvihill/stomper
lib/stomper/stomp_10.py
https://github.com/oisinmulvihill/stomper/blob/842ed2353a4ddd638d35929ae5b7b70eb298305c/lib/stomper/stomp_10.py#L493-L514
def receipt(self, msg): """Called to handle a receipt message received from the server. This method just logs the receipt message returned: NO_RESPONSE_NEEDED """ body = msg['body'].replace(NULL, '') brief_msg = "" if 'receipt-id' in msg['headers']: brief_msg = msg['headers']['receipt-id'] self.log.info("Received server receipt message - receipt-id:%s\n\n%s" % (brief_msg, body)) returned = NO_RESPONSE_NEEDED if self.testing: returned = 'receipt' return returned
[ "def", "receipt", "(", "self", ",", "msg", ")", ":", "body", "=", "msg", "[", "'body'", "]", ".", "replace", "(", "NULL", ",", "''", ")", "brief_msg", "=", "\"\"", "if", "'receipt-id'", "in", "msg", "[", "'headers'", "]", ":", "brief_msg", "=", "msg", "[", "'headers'", "]", "[", "'receipt-id'", "]", "self", ".", "log", ".", "info", "(", "\"Received server receipt message - receipt-id:%s\\n\\n%s\"", "%", "(", "brief_msg", ",", "body", ")", ")", "returned", "=", "NO_RESPONSE_NEEDED", "if", "self", ".", "testing", ":", "returned", "=", "'receipt'", "return", "returned" ]
Called to handle a receipt message received from the server. This method just logs the receipt message returned: NO_RESPONSE_NEEDED
[ "Called", "to", "handle", "a", "receipt", "message", "received", "from", "the", "server", "." ]
python
train
rainwoodman/sharedmem
sharedmem/sharedmem.py
https://github.com/rainwoodman/sharedmem/blob/b23e59c1ed0e28f7b6c96c17a04d55c700e06e3a/sharedmem/sharedmem.py#L812-L823
def copy(a): """ Copy an array to the shared memory. Notes ----- copy is not always necessary because the private memory is always copy-on-write. Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory """ shared = anonymousmemmap(a.shape, dtype=a.dtype) shared[:] = a[:] return shared
[ "def", "copy", "(", "a", ")", ":", "shared", "=", "anonymousmemmap", "(", "a", ".", "shape", ",", "dtype", "=", "a", ".", "dtype", ")", "shared", "[", ":", "]", "=", "a", "[", ":", "]", "return", "shared" ]
Copy an array to the shared memory. Notes ----- copy is not always necessary because the private memory is always copy-on-write. Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory
[ "Copy", "an", "array", "to", "the", "shared", "memory", "." ]
python
valid
luckydonald/pytgbot
pytgbot/api_types/sendable/input_media.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/sendable/input_media.py#L363-L392
def to_array(self): """ Serializes this InputMediaVideo to a dictionary. :return: dictionary representation of this object. :rtype: dict """ from .files import InputFile array = super(InputMediaVideo, self).to_array() # 'type' given by superclass # 'media' given by superclass if self.thumb is not None: if isinstance(self.thumb, InputFile): array['thumb'] = None # type InputFile elif isinstance(self.thumb, str): array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str else: raise TypeError('Unknown type, must be one of InputFile, str.') # end if # 'caption' given by superclass # 'parse_mode' given by superclass if self.width is not None: array['width'] = int(self.width) # type int if self.height is not None: array['height'] = int(self.height) # type int if self.duration is not None: array['duration'] = int(self.duration) # type int if self.supports_streaming is not None: array['supports_streaming'] = bool(self.supports_streaming) # type bool return array
[ "def", "to_array", "(", "self", ")", ":", "from", ".", "files", "import", "InputFile", "array", "=", "super", "(", "InputMediaVideo", ",", "self", ")", ".", "to_array", "(", ")", "# 'type' given by superclass", "# 'media' given by superclass", "if", "self", ".", "thumb", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "thumb", ",", "InputFile", ")", ":", "array", "[", "'thumb'", "]", "=", "None", "# type InputFile", "elif", "isinstance", "(", "self", ".", "thumb", ",", "str", ")", ":", "array", "[", "'thumb'", "]", "=", "u", "(", "self", ".", "thumb", ")", "# py2: type unicode, py3: type str", "else", ":", "raise", "TypeError", "(", "'Unknown type, must be one of InputFile, str.'", ")", "# end if", "# 'caption' given by superclass", "# 'parse_mode' given by superclass", "if", "self", ".", "width", "is", "not", "None", ":", "array", "[", "'width'", "]", "=", "int", "(", "self", ".", "width", ")", "# type int", "if", "self", ".", "height", "is", "not", "None", ":", "array", "[", "'height'", "]", "=", "int", "(", "self", ".", "height", ")", "# type int", "if", "self", ".", "duration", "is", "not", "None", ":", "array", "[", "'duration'", "]", "=", "int", "(", "self", ".", "duration", ")", "# type int", "if", "self", ".", "supports_streaming", "is", "not", "None", ":", "array", "[", "'supports_streaming'", "]", "=", "bool", "(", "self", ".", "supports_streaming", ")", "# type bool", "return", "array" ]
Serializes this InputMediaVideo to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "InputMediaVideo", "to", "a", "dictionary", "." ]
python
train
waqasbhatti/astrobase
astrobase/checkplot/pkl.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/checkplot/pkl.py#L1527-L1689
def checkplot_pickle_update( currentcp, updatedcp, outfile=None, outgzip=False, pickleprotocol=None, verbose=True ): '''This updates the current checkplotdict with updated values provided. Parameters ---------- currentcp : dict or str This is either a checkplotdict produced by `checkplot_pickle` above or a checkplot pickle file produced by the same function. This checkplot will be updated from the `updatedcp` checkplot. updatedcp : dict or str This is either a checkplotdict produced by `checkplot_pickle` above or a checkplot pickle file produced by the same function. This checkplot will be the source of the update to the `currentcp` checkplot. outfile : str or None The name of the output checkplot pickle file. The function will output the new checkplot gzipped pickle file to `outfile` if outfile is a filename. If `currentcp` is a file and `outfile`, this will be set to that filename, so the function updates it in place. outgzip : bool This controls whether to gzip the output pickle. It turns out that this is the slowest bit in the output process, so if you're after speed, best not to use this. This is False by default since it turns out that gzip actually doesn't save that much space (29 MB vs. 35 MB for the average checkplot pickle). pickleprotocol : int or None This sets the pickle file protocol to use when writing the pickle: If None, will choose a protocol using the following rules: - 4 -> default in Python >= 3.4 - fast but incompatible with Python 2 - 3 -> default in Python 3.0-3.3 - mildly fast - 2 -> default in Python 2 - very slow, but compatible with Python 2/3 The default protocol kwarg is None, this will make an automatic choice for pickle protocol that's best suited for the version of Python in use. Note that this will make pickles generated by Py3 incompatible with Py2. verbose : bool If True, will indicate progress and warn about problems. Returns ------- str The path to the updated checkplot pickle file. If `outfile` was None and `currentcp` was a filename, this will return `currentcp` to indicate that the checkplot pickle file was updated in place. ''' # break out python 2.7 and > 3 nonsense if sys.version_info[:2] > (3,2): # generate the outfile filename if not outfile and isinstance(currentcp,str): plotfpath = currentcp elif outfile: plotfpath = outfile elif isinstance(currentcp, dict) and currentcp['objectid']: if outgzip: plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid'] else: plotfpath = 'checkplot-%s.pkl' % currentcp['objectid'] else: # we'll get this later below plotfpath = None if (isinstance(currentcp, str) and os.path.exists(currentcp)): cp_current = _read_checkplot_picklefile(currentcp) elif isinstance(currentcp, dict): cp_current = currentcp else: LOGERROR('currentcp: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(currentcp), type(currentcp))) return None if (isinstance(updatedcp, str) and os.path.exists(updatedcp)): cp_updated = _read_checkplot_picklefile(updatedcp) elif isinstance(updatedcp, dict): cp_updated = updatedcp else: LOGERROR('updatedcp: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(updatedcp), type(updatedcp))) return None # check for unicode in python 2.7 else: # generate the outfile filename if (not outfile and (isinstance(currentcp, str) or isinstance(currentcp, unicode))): plotfpath = currentcp elif outfile: plotfpath = outfile elif isinstance(currentcp, dict) and currentcp['objectid']: if outgzip: plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid'] else: plotfpath = 'checkplot-%s.pkl' % currentcp['objectid'] else: # we'll get this later below plotfpath = None # get the current checkplotdict if ((isinstance(currentcp, str) or isinstance(currentcp, unicode)) and os.path.exists(currentcp)): cp_current = _read_checkplot_picklefile(currentcp) elif isinstance(currentcp,dict): cp_current = currentcp else: LOGERROR('currentcp: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(currentcp), type(currentcp))) return None # get the updated checkplotdict if ((isinstance(updatedcp, str) or isinstance(updatedcp, unicode)) and os.path.exists(updatedcp)): cp_updated = _read_checkplot_picklefile(updatedcp) elif isinstance(updatedcp, dict): cp_updated = updatedcp else: LOGERROR('updatedcp: %s of type %s is not a ' 'valid checkplot filename (or does not exist), or a dict' % (os.path.abspath(updatedcp), type(updatedcp))) return None # do the update using python's dict update mechanism # this requires updated to be in the same checkplotdict format as current # all keys in current will now be from updated cp_current.update(cp_updated) # figure out the plotfpath if we haven't by now if not plotfpath and outgzip: plotfpath = 'checkplot-%s.pkl.gz' % cp_current['objectid'] elif (not plotfpath) and (not outgzip): plotfpath = 'checkplot-%s.pkl' % cp_current['objectid'] # make sure we write the correct postfix if plotfpath.endswith('.gz'): outgzip = True # write the new checkplotdict return _write_checkplot_picklefile(cp_current, outfile=plotfpath, outgzip=outgzip, protocol=pickleprotocol)
[ "def", "checkplot_pickle_update", "(", "currentcp", ",", "updatedcp", ",", "outfile", "=", "None", ",", "outgzip", "=", "False", ",", "pickleprotocol", "=", "None", ",", "verbose", "=", "True", ")", ":", "# break out python 2.7 and > 3 nonsense", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">", "(", "3", ",", "2", ")", ":", "# generate the outfile filename", "if", "not", "outfile", "and", "isinstance", "(", "currentcp", ",", "str", ")", ":", "plotfpath", "=", "currentcp", "elif", "outfile", ":", "plotfpath", "=", "outfile", "elif", "isinstance", "(", "currentcp", ",", "dict", ")", "and", "currentcp", "[", "'objectid'", "]", ":", "if", "outgzip", ":", "plotfpath", "=", "'checkplot-%s.pkl.gz'", "%", "currentcp", "[", "'objectid'", "]", "else", ":", "plotfpath", "=", "'checkplot-%s.pkl'", "%", "currentcp", "[", "'objectid'", "]", "else", ":", "# we'll get this later below", "plotfpath", "=", "None", "if", "(", "isinstance", "(", "currentcp", ",", "str", ")", "and", "os", ".", "path", ".", "exists", "(", "currentcp", ")", ")", ":", "cp_current", "=", "_read_checkplot_picklefile", "(", "currentcp", ")", "elif", "isinstance", "(", "currentcp", ",", "dict", ")", ":", "cp_current", "=", "currentcp", "else", ":", "LOGERROR", "(", "'currentcp: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "currentcp", ")", ",", "type", "(", "currentcp", ")", ")", ")", "return", "None", "if", "(", "isinstance", "(", "updatedcp", ",", "str", ")", "and", "os", ".", "path", ".", "exists", "(", "updatedcp", ")", ")", ":", "cp_updated", "=", "_read_checkplot_picklefile", "(", "updatedcp", ")", "elif", "isinstance", "(", "updatedcp", ",", "dict", ")", ":", "cp_updated", "=", "updatedcp", "else", ":", "LOGERROR", "(", "'updatedcp: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "updatedcp", ")", ",", "type", "(", "updatedcp", ")", ")", ")", "return", "None", "# check for unicode in python 2.7", "else", ":", "# generate the outfile filename", "if", "(", "not", "outfile", "and", "(", "isinstance", "(", "currentcp", ",", "str", ")", "or", "isinstance", "(", "currentcp", ",", "unicode", ")", ")", ")", ":", "plotfpath", "=", "currentcp", "elif", "outfile", ":", "plotfpath", "=", "outfile", "elif", "isinstance", "(", "currentcp", ",", "dict", ")", "and", "currentcp", "[", "'objectid'", "]", ":", "if", "outgzip", ":", "plotfpath", "=", "'checkplot-%s.pkl.gz'", "%", "currentcp", "[", "'objectid'", "]", "else", ":", "plotfpath", "=", "'checkplot-%s.pkl'", "%", "currentcp", "[", "'objectid'", "]", "else", ":", "# we'll get this later below", "plotfpath", "=", "None", "# get the current checkplotdict", "if", "(", "(", "isinstance", "(", "currentcp", ",", "str", ")", "or", "isinstance", "(", "currentcp", ",", "unicode", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "currentcp", ")", ")", ":", "cp_current", "=", "_read_checkplot_picklefile", "(", "currentcp", ")", "elif", "isinstance", "(", "currentcp", ",", "dict", ")", ":", "cp_current", "=", "currentcp", "else", ":", "LOGERROR", "(", "'currentcp: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "currentcp", ")", ",", "type", "(", "currentcp", ")", ")", ")", "return", "None", "# get the updated checkplotdict", "if", "(", "(", "isinstance", "(", "updatedcp", ",", "str", ")", "or", "isinstance", "(", "updatedcp", ",", "unicode", ")", ")", "and", "os", ".", "path", ".", "exists", "(", "updatedcp", ")", ")", ":", "cp_updated", "=", "_read_checkplot_picklefile", "(", "updatedcp", ")", "elif", "isinstance", "(", "updatedcp", ",", "dict", ")", ":", "cp_updated", "=", "updatedcp", "else", ":", "LOGERROR", "(", "'updatedcp: %s of type %s is not a '", "'valid checkplot filename (or does not exist), or a dict'", "%", "(", "os", ".", "path", ".", "abspath", "(", "updatedcp", ")", ",", "type", "(", "updatedcp", ")", ")", ")", "return", "None", "# do the update using python's dict update mechanism", "# this requires updated to be in the same checkplotdict format as current", "# all keys in current will now be from updated", "cp_current", ".", "update", "(", "cp_updated", ")", "# figure out the plotfpath if we haven't by now", "if", "not", "plotfpath", "and", "outgzip", ":", "plotfpath", "=", "'checkplot-%s.pkl.gz'", "%", "cp_current", "[", "'objectid'", "]", "elif", "(", "not", "plotfpath", ")", "and", "(", "not", "outgzip", ")", ":", "plotfpath", "=", "'checkplot-%s.pkl'", "%", "cp_current", "[", "'objectid'", "]", "# make sure we write the correct postfix", "if", "plotfpath", ".", "endswith", "(", "'.gz'", ")", ":", "outgzip", "=", "True", "# write the new checkplotdict", "return", "_write_checkplot_picklefile", "(", "cp_current", ",", "outfile", "=", "plotfpath", ",", "outgzip", "=", "outgzip", ",", "protocol", "=", "pickleprotocol", ")" ]
This updates the current checkplotdict with updated values provided. Parameters ---------- currentcp : dict or str This is either a checkplotdict produced by `checkplot_pickle` above or a checkplot pickle file produced by the same function. This checkplot will be updated from the `updatedcp` checkplot. updatedcp : dict or str This is either a checkplotdict produced by `checkplot_pickle` above or a checkplot pickle file produced by the same function. This checkplot will be the source of the update to the `currentcp` checkplot. outfile : str or None The name of the output checkplot pickle file. The function will output the new checkplot gzipped pickle file to `outfile` if outfile is a filename. If `currentcp` is a file and `outfile`, this will be set to that filename, so the function updates it in place. outgzip : bool This controls whether to gzip the output pickle. It turns out that this is the slowest bit in the output process, so if you're after speed, best not to use this. This is False by default since it turns out that gzip actually doesn't save that much space (29 MB vs. 35 MB for the average checkplot pickle). pickleprotocol : int or None This sets the pickle file protocol to use when writing the pickle: If None, will choose a protocol using the following rules: - 4 -> default in Python >= 3.4 - fast but incompatible with Python 2 - 3 -> default in Python 3.0-3.3 - mildly fast - 2 -> default in Python 2 - very slow, but compatible with Python 2/3 The default protocol kwarg is None, this will make an automatic choice for pickle protocol that's best suited for the version of Python in use. Note that this will make pickles generated by Py3 incompatible with Py2. verbose : bool If True, will indicate progress and warn about problems. Returns ------- str The path to the updated checkplot pickle file. If `outfile` was None and `currentcp` was a filename, this will return `currentcp` to indicate that the checkplot pickle file was updated in place.
[ "This", "updates", "the", "current", "checkplotdict", "with", "updated", "values", "provided", "." ]
python
valid
Zsailer/kubeconf
kubeconf/kubeconf.py
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L259-L265
def remove_from_user(self, name, *args): """Remove attributes from a user. """ user = self.get_user(name=name) attrs_ = user['user'] for a in args: del attrs_[a]
[ "def", "remove_from_user", "(", "self", ",", "name", ",", "*", "args", ")", ":", "user", "=", "self", ".", "get_user", "(", "name", "=", "name", ")", "attrs_", "=", "user", "[", "'user'", "]", "for", "a", "in", "args", ":", "del", "attrs_", "[", "a", "]" ]
Remove attributes from a user.
[ "Remove", "attributes", "from", "a", "user", "." ]
python
train
hobson/pug-invest
pug/invest/models.py
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/models.py#L37-L76
def get_dataframes(symbols=("sne", "goog", "tsla"), source='yahoo', refresh=False): """Retreive table of market data ("Close", "Volume", "Adj Close") for each symbol requested >>> dfdict = get_dataframes('GOOG', 'SNE') """ symbols = util.make_symbols(list(symbols)) if refresh: symbols_to_refresh = symbols else: symbols_to_refresh = [sym for sym in symbols if not Equity.objects.filter(symbol=sym).exists()] source = source.lower().strip() if source in ('yahoo', 'google'): source += '_finance' if source[:3] == 'fed': source = 'federal_reserve_economic_data' ccpanda = ccp.ConcurrentPandas() # set the data source getattr(ccpanda, "set_source_" + source)() if symbols_to_refresh: # tell concurrent pandas which keys/symbols to retrieve ccpanda.insert_keys(symbols_to_refresh) # start concurrentpandas threads ccpanda.consume_keys_asynchronous_threads() # FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute? pseudodict = ccpanda.return_map() else: pseudodict = {} table = {} for sym in symbols: e, created = None, False if not sym in symbols_to_refresh: e, created = Equity.objects.get_or_create(symbol=sym) if created or not e or not e.time_series or sym in symbols_to_refresh: e, created = Equity.objects.get_or_create( symbol=sym, name=sym, # FIXME: use data source to find equity name! time_series=pseudodict[sym].to_json(), ) table[sym] = pd.io.json.read_json(path_or_buf=e.time_series, orient='columns', typ='frame', convert_dates=True) return table
[ "def", "get_dataframes", "(", "symbols", "=", "(", "\"sne\"", ",", "\"goog\"", ",", "\"tsla\"", ")", ",", "source", "=", "'yahoo'", ",", "refresh", "=", "False", ")", ":", "symbols", "=", "util", ".", "make_symbols", "(", "list", "(", "symbols", ")", ")", "if", "refresh", ":", "symbols_to_refresh", "=", "symbols", "else", ":", "symbols_to_refresh", "=", "[", "sym", "for", "sym", "in", "symbols", "if", "not", "Equity", ".", "objects", ".", "filter", "(", "symbol", "=", "sym", ")", ".", "exists", "(", ")", "]", "source", "=", "source", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "source", "in", "(", "'yahoo'", ",", "'google'", ")", ":", "source", "+=", "'_finance'", "if", "source", "[", ":", "3", "]", "==", "'fed'", ":", "source", "=", "'federal_reserve_economic_data'", "ccpanda", "=", "ccp", ".", "ConcurrentPandas", "(", ")", "# set the data source", "getattr", "(", "ccpanda", ",", "\"set_source_\"", "+", "source", ")", "(", ")", "if", "symbols_to_refresh", ":", "# tell concurrent pandas which keys/symbols to retrieve", "ccpanda", ".", "insert_keys", "(", "symbols_to_refresh", ")", "# start concurrentpandas threads", "ccpanda", ".", "consume_keys_asynchronous_threads", "(", ")", "# FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute?", "pseudodict", "=", "ccpanda", ".", "return_map", "(", ")", "else", ":", "pseudodict", "=", "{", "}", "table", "=", "{", "}", "for", "sym", "in", "symbols", ":", "e", ",", "created", "=", "None", ",", "False", "if", "not", "sym", "in", "symbols_to_refresh", ":", "e", ",", "created", "=", "Equity", ".", "objects", ".", "get_or_create", "(", "symbol", "=", "sym", ")", "if", "created", "or", "not", "e", "or", "not", "e", ".", "time_series", "or", "sym", "in", "symbols_to_refresh", ":", "e", ",", "created", "=", "Equity", ".", "objects", ".", "get_or_create", "(", "symbol", "=", "sym", ",", "name", "=", "sym", ",", "# FIXME: use data source to find equity name!", "time_series", "=", "pseudodict", "[", "sym", "]", ".", "to_json", "(", ")", ",", ")", "table", "[", "sym", "]", "=", "pd", ".", "io", ".", "json", ".", "read_json", "(", "path_or_buf", "=", "e", ".", "time_series", ",", "orient", "=", "'columns'", ",", "typ", "=", "'frame'", ",", "convert_dates", "=", "True", ")", "return", "table" ]
Retreive table of market data ("Close", "Volume", "Adj Close") for each symbol requested >>> dfdict = get_dataframes('GOOG', 'SNE')
[ "Retreive", "table", "of", "market", "data", "(", "Close", "Volume", "Adj", "Close", ")", "for", "each", "symbol", "requested" ]
python
train
CalebBell/thermo
thermo/mixture.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/mixture.py#L2230-L2244
def sigma(self): r'''Surface tension of the mixture at its current temperature and composition, in units of [N/m]. For calculation of this property at other temperatures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.interface.SurfaceTensionMixture`; each Mixture instance creates one to actually perform the calculations. Examples -------- >>> Mixture(['water'], ws=[1], T=300, P=1E5).sigma 0.07176932405246211 ''' return self.SurfaceTensionMixture(self.T, self.P, self.zs, self.ws)
[ "def", "sigma", "(", "self", ")", ":", "return", "self", ".", "SurfaceTensionMixture", "(", "self", ".", "T", ",", "self", ".", "P", ",", "self", ".", "zs", ",", "self", ".", "ws", ")" ]
r'''Surface tension of the mixture at its current temperature and composition, in units of [N/m]. For calculation of this property at other temperatures, or specifying manually the method used to calculate it, and more - see the object oriented interface :obj:`thermo.interface.SurfaceTensionMixture`; each Mixture instance creates one to actually perform the calculations. Examples -------- >>> Mixture(['water'], ws=[1], T=300, P=1E5).sigma 0.07176932405246211
[ "r", "Surface", "tension", "of", "the", "mixture", "at", "its", "current", "temperature", "and", "composition", "in", "units", "of", "[", "N", "/", "m", "]", "." ]
python
valid
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L746-L779
def _bindDomain(self, domain_name, create=False, block=True): """ Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires. """ log.debug("Binding to job store domain '%s'.", domain_name) retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e)) if not block: retryargs['timeout'] = 15 for attempt in retry_sdb(**retryargs): with attempt: try: return self.db.get_domain(domain_name) except SDBResponseError as e: if no_such_sdb_domain(e): if create: return self.db.create_domain(domain_name) elif block: raise else: return None else: raise
[ "def", "_bindDomain", "(", "self", ",", "domain_name", ",", "create", "=", "False", ",", "block", "=", "True", ")", ":", "log", ".", "debug", "(", "\"Binding to job store domain '%s'.\"", ",", "domain_name", ")", "retryargs", "=", "dict", "(", "predicate", "=", "lambda", "e", ":", "no_such_sdb_domain", "(", "e", ")", "or", "sdb_unavailable", "(", "e", ")", ")", "if", "not", "block", ":", "retryargs", "[", "'timeout'", "]", "=", "15", "for", "attempt", "in", "retry_sdb", "(", "*", "*", "retryargs", ")", ":", "with", "attempt", ":", "try", ":", "return", "self", ".", "db", ".", "get_domain", "(", "domain_name", ")", "except", "SDBResponseError", "as", "e", ":", "if", "no_such_sdb_domain", "(", "e", ")", ":", "if", "create", ":", "return", "self", ".", "db", ".", "create_domain", "(", "domain_name", ")", "elif", "block", ":", "raise", "else", ":", "return", "None", "else", ":", "raise" ]
Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires.
[ "Return", "the", "Boto", "Domain", "object", "representing", "the", "SDB", "domain", "of", "the", "given", "name", ".", "If", "the", "domain", "does", "not", "exist", "and", "create", "is", "True", "it", "will", "be", "created", "." ]
python
train
mitsei/dlkit
dlkit/handcar/osid/queries.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/queries.py#L71-L75
def _add_match(self, match_key, match_value): """Adds a match key/value""" if match_key is None: raise errors.NullArgument() self._query_terms[match_key] = str(match_key) + '=' + str(match_value)
[ "def", "_add_match", "(", "self", ",", "match_key", ",", "match_value", ")", ":", "if", "match_key", "is", "None", ":", "raise", "errors", ".", "NullArgument", "(", ")", "self", ".", "_query_terms", "[", "match_key", "]", "=", "str", "(", "match_key", ")", "+", "'='", "+", "str", "(", "match_value", ")" ]
Adds a match key/value
[ "Adds", "a", "match", "key", "/", "value" ]
python
train
kronenthaler/mod-pbxproj
pbxproj/pbxextensions/ProjectFlags.py
https://github.com/kronenthaler/mod-pbxproj/blob/8de3cbdd3210480ddbb1fa0f50a4f4ea87de6e71/pbxproj/pbxextensions/ProjectFlags.py#L79-L87
def remove_other_ldflags(self, flags, target_name=None, configuration_name=None): """ Removes the given flags from the OTHER_LDFLAGS section of the target on the configurations :param flags: A string or array of strings. If none, removes all values from the flag. :param target_name: Target name or list of target names to remove the flag from or None for every target :param configuration_name: Configuration name to add the flag to or None for every configuration :return: void """ self.remove_flags(XCBuildConfigurationFlags.OTHER_LDFLAGS, flags, target_name, configuration_name)
[ "def", "remove_other_ldflags", "(", "self", ",", "flags", ",", "target_name", "=", "None", ",", "configuration_name", "=", "None", ")", ":", "self", ".", "remove_flags", "(", "XCBuildConfigurationFlags", ".", "OTHER_LDFLAGS", ",", "flags", ",", "target_name", ",", "configuration_name", ")" ]
Removes the given flags from the OTHER_LDFLAGS section of the target on the configurations :param flags: A string or array of strings. If none, removes all values from the flag. :param target_name: Target name or list of target names to remove the flag from or None for every target :param configuration_name: Configuration name to add the flag to or None for every configuration :return: void
[ "Removes", "the", "given", "flags", "from", "the", "OTHER_LDFLAGS", "section", "of", "the", "target", "on", "the", "configurations", ":", "param", "flags", ":", "A", "string", "or", "array", "of", "strings", ".", "If", "none", "removes", "all", "values", "from", "the", "flag", ".", ":", "param", "target_name", ":", "Target", "name", "or", "list", "of", "target", "names", "to", "remove", "the", "flag", "from", "or", "None", "for", "every", "target", ":", "param", "configuration_name", ":", "Configuration", "name", "to", "add", "the", "flag", "to", "or", "None", "for", "every", "configuration", ":", "return", ":", "void" ]
python
train
adamrehn/ue4cli
ue4cli/CachedDataManager.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/CachedDataManager.py#L27-L32
def setCachedDataKey(engineVersionHash, key, value): """ Sets the cached data value for the specified engine version hash and dictionary key """ cacheFile = CachedDataManager._cacheFileForHash(engineVersionHash) return JsonDataManager(cacheFile).setKey(key, value)
[ "def", "setCachedDataKey", "(", "engineVersionHash", ",", "key", ",", "value", ")", ":", "cacheFile", "=", "CachedDataManager", ".", "_cacheFileForHash", "(", "engineVersionHash", ")", "return", "JsonDataManager", "(", "cacheFile", ")", ".", "setKey", "(", "key", ",", "value", ")" ]
Sets the cached data value for the specified engine version hash and dictionary key
[ "Sets", "the", "cached", "data", "value", "for", "the", "specified", "engine", "version", "hash", "and", "dictionary", "key" ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/handlers/basehandler.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/basehandler.py#L107-L116
def make_error_response(self, message): """ Makes the python dict corresponding to the JSON that needs to be sent for a failed response. Message is the message that is sent as the reason for failure. """ response = self.make_response(constants.RESPONSE_STATUS_FAILURE) response[constants.RESPONSE_KEY_MESSAGE] = message return response
[ "def", "make_error_response", "(", "self", ",", "message", ")", ":", "response", "=", "self", ".", "make_response", "(", "constants", ".", "RESPONSE_STATUS_FAILURE", ")", "response", "[", "constants", ".", "RESPONSE_KEY_MESSAGE", "]", "=", "message", "return", "response" ]
Makes the python dict corresponding to the JSON that needs to be sent for a failed response. Message is the message that is sent as the reason for failure.
[ "Makes", "the", "python", "dict", "corresponding", "to", "the", "JSON", "that", "needs", "to", "be", "sent", "for", "a", "failed", "response", ".", "Message", "is", "the", "message", "that", "is", "sent", "as", "the", "reason", "for", "failure", "." ]
python
valid
apache/spark
python/pyspark/sql/functions.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L580-L595
def rand(seed=None): """Generates a random column with independent and identically distributed (i.i.d.) samples from U[0.0, 1.0]. .. note:: The function is non-deterministic in general case. >>> df.withColumn('rand', rand(seed=42) * 3).collect() [Row(age=2, name=u'Alice', rand=2.4052597283576684), Row(age=5, name=u'Bob', rand=2.3913904055683974)] """ sc = SparkContext._active_spark_context if seed is not None: jc = sc._jvm.functions.rand(seed) else: jc = sc._jvm.functions.rand() return Column(jc)
[ "def", "rand", "(", "seed", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "seed", "is", "not", "None", ":", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "rand", "(", "seed", ")", "else", ":", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "rand", "(", ")", "return", "Column", "(", "jc", ")" ]
Generates a random column with independent and identically distributed (i.i.d.) samples from U[0.0, 1.0]. .. note:: The function is non-deterministic in general case. >>> df.withColumn('rand', rand(seed=42) * 3).collect() [Row(age=2, name=u'Alice', rand=2.4052597283576684), Row(age=5, name=u'Bob', rand=2.3913904055683974)]
[ "Generates", "a", "random", "column", "with", "independent", "and", "identically", "distributed", "(", "i", ".", "i", ".", "d", ".", ")", "samples", "from", "U", "[", "0", ".", "0", "1", ".", "0", "]", "." ]
python
train